code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package chandu0101.scalajs.react.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js
import scalacss.Defaults._
import scalacss.ScalaCssReact._
object ReactSearchBox {
class Style extends StyleSheet.Inline {
import dsl._
val searchBox = style(marginBottom(10 px))
val input = style(
fontSize(13 px),
fontWeight._300,
padding(3 px),
width(100.%%),
backgroundColor.transparent,
borderBottom :=! "1px solid #B2ADAD",
&.focus(outline.none,
borderBottom :=! "1.5px solid #03a9f4"
)
)
}
class Backend(t: BackendScope[Props, _]) {
def onTextChange(P: Props)(e: ReactEventI) =
e.preventDefaultCB >> P.onTextChange(e.target.value)
def render(P: Props) =
<.div(P.style.searchBox)(
<.input(P.style.input, ^.placeholder := "Search ..", ^.onKeyUp ==> onTextChange(P))
)
}
object DefaultStyle extends Style
val component = ReactComponentB[Props]("ReactSearchBox")
.stateless
.renderBackend[Backend]
.build
case class Props(onTextChange: String => Callback, style: Style)
def apply(onTextChange: String => Callback, style: Style = DefaultStyle, ref: js.UndefOr[String] = "", key: js.Any = {}) = component.set(key, ref)(Props(onTextChange,style))
} | elacin/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/ReactSearchBox.scala | Scala | apache-2.0 | 1,342 |
package com.thetestpeople.trt.webdriver
import org.junit.runner.RunWith
import com.thetestpeople.trt.model.impl.DummyData
import com.thetestpeople.trt.mother.{ IncomingFactory ⇒ F }
import com.thetestpeople.trt.tags.SlowTest
import org.scalatest.junit.JUnitRunner
import com.github.nscala_time.time.Imports._
@SlowTest
@RunWith(classOf[JUnitRunner])
class StaleTestsScreenTest extends AbstractBrowserTest {
"A test that hasn't been run for a while" should "be listed in the stale tests screen" in {
automate { site ⇒
val batch = F.batch(executions = List(
F.execution(F.test(name = "test1"), executionTimeOpt = Some(1.hour.ago)),
F.execution(F.test(name = "test2"), executionTimeOpt = Some(2.hours.ago)),
F.execution(F.test(name = "test3"), executionTimeOpt = Some(3.hours.ago)),
F.execution(F.test(name = "test4"), executionTimeOpt = Some(4.hours.ago)),
F.execution(F.test(name = "test5"), executionTimeOpt = Some(5.weeks.ago))))
site.restApi.addBatch(batch)
val staleTestsScreen = site.launch().mainMenu.reports().staleTests()
val Seq(row) = staleTestsScreen.testRows
row.name should equal("test5")
}
}
} | thetestpeople/trt | test/com/thetestpeople/trt/webdriver/StaleTestsScreenTest.scala | Scala | mit | 1,198 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.nio._
import java.nio.channels._
import java.nio.charset.{Charset, StandardCharsets}
import java.security.cert.X509Certificate
import java.util.{Collections, Properties}
import java.util.concurrent.{Callable, Executors, TimeUnit}
import javax.net.ssl.X509TrustManager
import kafka.api._
import kafka.cluster.{Broker, EndPoint}
import kafka.common.TopicAndPartition
import kafka.consumer.{ConsumerConfig, ConsumerTimeoutException, KafkaStream}
import kafka.log._
import kafka.message._
import kafka.producer._
import kafka.security.auth.{Acl, Authorizer, Resource}
import kafka.serializer.{DefaultEncoder, Encoder, StringEncoder}
import kafka.server._
import kafka.server.checkpoints.OffsetCheckpointFile
import Implicits._
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.zk.{AdminZkClient, BrokerIdsZNode, BrokerInfo, KafkaZkClient}
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.admin.{AdminClient, AlterConfigsResult, Config, ConfigEntry}
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer, OffsetAndMetadata, RangeAssignor}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.header.Header
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.network.{ListenerName, Mode}
import org.apache.kafka.common.record._
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer}
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.utils.Utils._
import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils}
import org.apache.zookeeper.ZooDefs._
import org.apache.zookeeper.data.ACL
import org.junit.Assert._
import scala.collection.JavaConverters._
import scala.collection.{Map, mutable}
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
/**
* Utility functions to help with testing
*/
object TestUtils extends Logging {
val random = JTestUtils.RANDOM
/* 0 gives a random port; you can then retrieve the assigned port from the Socket object. */
val RandomPort = 0
/** Port to use for unit tests that mock/don't require a real ZK server. */
val MockZkPort = 1
/** ZooKeeper connection string to use for unit tests that mock/don't require a real ZK server. */
val MockZkConnect = "127.0.0.1:" + MockZkPort
// CN in SSL certificates - this is used for endpoint validation when enabled
val SslCertificateCn = "localhost"
private val transactionStatusKey = "transactionStatus"
private val committedValue : Array[Byte] = "committed".getBytes(StandardCharsets.UTF_8)
private val abortedValue : Array[Byte] = "aborted".getBytes(StandardCharsets.UTF_8)
/**
* Create a temporary directory
*/
def tempDir(): File = JTestUtils.tempDirectory()
def tempTopic(): String = "testTopic" + random.nextInt(1000000)
/**
* Create a temporary relative directory
*/
def tempRelativeDir(parent: String): File = {
val parentFile = new File(parent)
parentFile.mkdirs()
JTestUtils.tempDirectory(parentFile.toPath, null)
}
/**
* Create a random log directory in the format <string>-<int> used for Kafka partition logs.
* It is the responsibility of the caller to set up a shutdown hook for deletion of the directory.
*/
def randomPartitionLogDir(parentDir: File): File = {
val attempts = 1000
val f = Iterator.continually(new File(parentDir, "kafka-" + random.nextInt(1000000)))
.take(attempts).find(_.mkdir())
.getOrElse(sys.error(s"Failed to create directory after $attempts attempts"))
f.deleteOnExit()
f
}
/**
* Create a temporary file
*/
def tempFile(): File = JTestUtils.tempFile()
/**
* Create a temporary file and return an open file channel for this file
*/
def tempChannel(): FileChannel = new RandomAccessFile(tempFile(), "rw").getChannel()
/**
* Create a kafka server instance with appropriate test settings
* USING THIS IS A SIGN YOU ARE NOT WRITING A REAL UNIT TEST
*
* @param config The configuration of the server
*/
def createServer(config: KafkaConfig, time: Time = Time.SYSTEM): KafkaServer = {
val server = new KafkaServer(config, time)
server.startup()
server
}
def boundPort(server: KafkaServer, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Int =
server.boundPort(ListenerName.forSecurityProtocol(securityProtocol))
def createBroker(id: Int, host: String, port: Int, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Broker =
new Broker(id, host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfigs(numConfigs: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = false,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSsl: Boolean = false,
enableSaslPlaintext: Boolean = false,
enableSaslSsl: Boolean = false,
rackInfo: Map[Int, String] = Map(),
logDirCount: Int = 1,
enableToken: Boolean = false): Seq[Properties] = {
(0 until numConfigs).map { node =>
createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, RandomPort,
interBrokerSecurityProtocol, trustStoreFile, saslProperties, enablePlaintext = enablePlaintext, enableSsl = enableSsl,
enableSaslPlaintext = enableSaslPlaintext, enableSaslSsl = enableSaslSsl, rack = rackInfo.get(node), logDirCount = logDirCount, enableToken = enableToken)
}
}
def getBrokerListStrFromServers(servers: Seq[KafkaServer], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.securityProtocol == protocol).getOrElse(
sys.error(s"Could not find listener with security protocol $protocol"))
formatAddress(listener.host, boundPort(s, protocol))
}.mkString(",")
}
def bootstrapServers(servers: Seq[KafkaServer], listenerName: ListenerName): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.listenerName == listenerName).getOrElse(
sys.error(s"Could not find listener with name ${listenerName.value}"))
formatAddress(listener.host, s.boundPort(listenerName))
}.mkString(",")
}
/**
* Shutdown `servers` and delete their log directories.
*/
def shutdownServers(servers: Seq[KafkaServer]) {
servers.par.foreach { s =>
s.shutdown()
CoreUtils.delete(s.config.logDirs)
}
}
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfig(nodeId: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = false,
port: Int = RandomPort,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSaslPlaintext: Boolean = false,
saslPlaintextPort: Int = RandomPort,
enableSsl: Boolean = false,
sslPort: Int = RandomPort,
enableSaslSsl: Boolean = false,
saslSslPort: Int = RandomPort,
rack: Option[String] = None,
logDirCount: Int = 1,
enableToken: Boolean = false): Properties = {
def shouldEnable(protocol: SecurityProtocol) = interBrokerSecurityProtocol.fold(false)(_ == protocol)
val protocolAndPorts = ArrayBuffer[(SecurityProtocol, Int)]()
if (enablePlaintext || shouldEnable(SecurityProtocol.PLAINTEXT))
protocolAndPorts += SecurityProtocol.PLAINTEXT -> port
if (enableSsl || shouldEnable(SecurityProtocol.SSL))
protocolAndPorts += SecurityProtocol.SSL -> sslPort
if (enableSaslPlaintext || shouldEnable(SecurityProtocol.SASL_PLAINTEXT))
protocolAndPorts += SecurityProtocol.SASL_PLAINTEXT -> saslPlaintextPort
if (enableSaslSsl || shouldEnable(SecurityProtocol.SASL_SSL))
protocolAndPorts += SecurityProtocol.SASL_SSL -> saslSslPort
val listeners = protocolAndPorts.map { case (protocol, port) =>
s"${protocol.name}://localhost:$port"
}.mkString(",")
val props = new Properties
if (nodeId >= 0) props.put(KafkaConfig.BrokerIdProp, nodeId.toString)
props.put(KafkaConfig.ListenersProp, listeners)
if (logDirCount > 1) {
val logDirs = (1 to logDirCount).toList.map(i =>
// We would like to allow user to specify both relative path and absolute path as log directory for backward-compatibility reason
// We can verify this by using a mixture of relative path and absolute path as log directories in the test
if (i % 2 == 0) TestUtils.tempDir().getAbsolutePath else TestUtils.tempRelativeDir("data")
).mkString(",")
props.put(KafkaConfig.LogDirsProp, logDirs)
} else {
props.put(KafkaConfig.LogDirProp, TestUtils.tempDir().getAbsolutePath)
}
props.put(KafkaConfig.ZkConnectProp, zkConnect)
props.put(KafkaConfig.ZkConnectionTimeoutMsProp, "10000")
props.put(KafkaConfig.ReplicaSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControllerSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControlledShutdownEnableProp, enableControlledShutdown.toString)
props.put(KafkaConfig.DeleteTopicEnableProp, enableDeleteTopic.toString)
props.put(KafkaConfig.LogDeleteDelayMsProp, "1000")
props.put(KafkaConfig.ControlledShutdownRetryBackoffMsProp, "100")
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, "2097152")
props.put(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString)
props.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "1")
if (!props.containsKey(KafkaConfig.OffsetsTopicPartitionsProp))
props.put(KafkaConfig.OffsetsTopicPartitionsProp, "5")
if (!props.containsKey(KafkaConfig.GroupInitialRebalanceDelayMsProp))
props.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
rack.foreach(props.put(KafkaConfig.RackProp, _))
if (protocolAndPorts.exists { case (protocol, _) => usesSslTransportLayer(protocol) })
props ++= sslConfigs(Mode.SERVER, false, trustStoreFile, s"server$nodeId")
if (protocolAndPorts.exists { case (protocol, _) => usesSaslAuthentication(protocol) })
props ++= JaasTestUtils.saslConfigs(saslProperties)
interBrokerSecurityProtocol.foreach { protocol =>
props.put(KafkaConfig.InterBrokerSecurityProtocolProp, protocol.name)
}
if (enableToken)
props.put(KafkaConfig.DelegationTokenMasterKeyProp, "masterkey")
props
}
/**
* Create a topic in ZooKeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient,
topic: String,
numPartitions: Int = 1,
replicationFactor: Int = 1,
servers: Seq[KafkaServer],
topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
adminZkClient.createTopic(topic, numPartitions, replicationFactor, topicConfig)
// wait until the update metadata request for new topic reaches all servers
(0 until numPartitions).map { i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient, topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]],
servers: Seq[KafkaServer]): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, partitionReplicaAssignment)
// wait until the update metadata request for new topic reaches all servers
partitionReplicaAssignment.keySet.map { case i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create the consumer offsets/group metadata topic and wait until the leader is elected and metadata is propagated
* to all brokers.
*/
def createOffsetsTopic(zkClient: KafkaZkClient, servers: Seq[KafkaServer]): Unit = {
val server = servers.head
createTopic(zkClient, Topic.GROUP_METADATA_TOPIC_NAME,
server.config.getInt(KafkaConfig.OffsetsTopicPartitionsProp),
server.config.getShort(KafkaConfig.OffsetsTopicReplicationFactorProp).toInt,
servers,
server.groupCoordinator.offsetsTopicConfigs)
}
/**
* Create a test config for a consumer
*/
def createConsumerProperties(zkConnect: String, groupId: String, consumerId: String,
consumerTimeout: Long = -1): Properties = {
val props = new Properties
props.put("zookeeper.connect", zkConnect)
props.put("group.id", groupId)
props.put("consumer.id", consumerId)
props.put("consumer.timeout.ms", consumerTimeout.toString)
props.put("zookeeper.session.timeout.ms", "6000")
props.put("zookeeper.sync.time.ms", "200")
props.put("auto.commit.interval.ms", "1000")
props.put("rebalance.max.retries", "4")
props.put("auto.offset.reset", "smallest")
props.put("num.consumer.fetchers", "2")
props
}
/**
* Fail a test case explicitly. Return Nothing so that we are not constrained by the return type.
*/
def fail(msg: String): Nothing = throw new AssertionError(msg)
/**
* Wrap a single record log buffer.
*/
def singletonRecords(value: Array[Byte],
key: Array[Byte] = null,
codec: CompressionType = CompressionType.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
records(Seq(new SimpleRecord(timestamp, key, value)), magicValue = magicValue, codec = codec)
}
def recordsWithValues(magicValue: Byte,
codec: CompressionType,
values: Array[Byte]*): MemoryRecords = {
records(values.map(value => new SimpleRecord(value)), magicValue, codec)
}
def records(records: Iterable[SimpleRecord],
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE,
codec: CompressionType = CompressionType.NONE,
producerId: Long = RecordBatch.NO_PRODUCER_ID,
producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH,
sequence: Int = RecordBatch.NO_SEQUENCE,
baseOffset: Long = 0L): MemoryRecords = {
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, baseOffset,
System.currentTimeMillis, producerId, producerEpoch, sequence)
records.foreach(builder.append)
builder.build()
}
/**
* Generate an array of random bytes
*
* @param numBytes The size of the array
*/
def randomBytes(numBytes: Int): Array[Byte] = JTestUtils.randomBytes(numBytes)
/**
* Generate a random string of letters and digits of the given length
*
* @param len The length of the string
* @return The random string
*/
def randomString(len: Int): String = JTestUtils.randomString(len)
/**
* Check that the buffer content from buffer.position() to buffer.limit() is equal
*/
def checkEquals(b1: ByteBuffer, b2: ByteBuffer) {
assertEquals("Buffers should have equal length", b1.limit() - b1.position(), b2.limit() - b2.position())
for(i <- 0 until b1.limit() - b1.position())
assertEquals("byte " + i + " byte not equal.", b1.get(b1.position() + i), b2.get(b1.position() + i))
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](expected: Iterator[T], actual: Iterator[T]) {
var length = 0
while(expected.hasNext && actual.hasNext) {
length += 1
assertEquals(expected.next, actual.next)
}
// check if the expected iterator is longer
if (expected.hasNext) {
var length1 = length
while (expected.hasNext) {
expected.next
length1 += 1
}
assertFalse("Iterators have uneven length-- first has more: "+length1 + " > " + length, true)
}
// check if the actual iterator was longer
if (actual.hasNext) {
var length2 = length
while (actual.hasNext) {
actual.next
length2 += 1
}
assertFalse("Iterators have uneven length-- second has more: "+length2 + " > " + length, true)
}
}
/**
* Throw an exception if an iterable has different length than expected
*
*/
def checkLength[T](s1: Iterator[T], expectedLength:Int) {
var n = 0
while (s1.hasNext) {
n+=1
s1.next
}
assertEquals(expectedLength, n)
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](s1: java.util.Iterator[T], s2: java.util.Iterator[T]) {
while(s1.hasNext && s2.hasNext)
assertEquals(s1.next, s2.next)
assertFalse("Iterators have uneven length--first has more", s1.hasNext)
assertFalse("Iterators have uneven length--second has more", s2.hasNext)
}
def stackedIterator[T](s: Iterator[T]*): Iterator[T] = {
new Iterator[T] {
var cur: Iterator[T] = null
val topIterator = s.iterator
def hasNext: Boolean = {
while (true) {
if (cur == null) {
if (topIterator.hasNext)
cur = topIterator.next
else
return false
}
if (cur.hasNext)
return true
cur = null
}
// should never reach her
throw new RuntimeException("should not reach here")
}
def next() : T = cur.next
}
}
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(bytes: Array[Byte]): String = hexString(ByteBuffer.wrap(bytes))
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(buffer: ByteBuffer): String = {
val builder = new StringBuilder("0x")
for(i <- 0 until buffer.limit())
builder.append(String.format("%x", Integer.valueOf(buffer.get(buffer.position() + i))))
builder.toString
}
/**
* Create a producer with a few pre-configured properties.
* If certain properties need to be overridden, they can be provided in producerProps.
*/
@deprecated("This method has been deprecated and it will be removed in a future release.", "0.10.0.0")
def createProducer[K, V](brokerList: String,
encoder: String = classOf[DefaultEncoder].getName,
keyEncoder: String = classOf[DefaultEncoder].getName,
partitioner: String = classOf[DefaultPartitioner].getName,
producerProps: Properties = null): Producer[K, V] = {
val props: Properties = getProducerConfig(brokerList)
//override any explicitly specified properties
if (producerProps != null)
props ++= producerProps
props.put("serializer.class", encoder)
props.put("key.serializer.class", keyEncoder)
props.put("partitioner.class", partitioner)
new Producer[K, V](new kafka.producer.ProducerConfig(props))
}
def securityConfigs(mode: Mode,
securityProtocol: SecurityProtocol,
trustStoreFile: Option[File],
certAlias: String,
certCn: String,
saslProperties: Option[Properties]): Properties = {
val props = new Properties
if (usesSslTransportLayer(securityProtocol))
props ++= sslConfigs(mode, securityProtocol == SecurityProtocol.SSL, trustStoreFile, certAlias, certCn)
if (usesSaslAuthentication(securityProtocol))
props ++= JaasTestUtils.saslConfigs(saslProperties)
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol.name)
props
}
def producerSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "producer", SslCertificateCn, saslProperties)
/**
* Create a (new) producer with a few pre-configured properties.
*/
def createNewProducer[K, V](brokerList: String,
acks: Int = -1,
maxBlockMs: Long = 60 * 1000L,
bufferSize: Long = 1024L * 1024L,
retries: Int = 0,
lingerMs: Long = 0,
requestTimeoutMs: Long = 30 * 1000L,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keySerializer: Serializer[K] = new ByteArraySerializer,
valueSerializer: Serializer[V] = new ByteArraySerializer,
props: Option[Properties] = None): KafkaProducer[K, V] = {
val producerProps = props.getOrElse(new Properties)
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.ACKS_CONFIG, acks.toString)
producerProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs.toString)
producerProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferSize.toString)
producerProps.put(ProducerConfig.RETRIES_CONFIG, retries.toString)
producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs.toString)
/* Only use these if not already set */
val defaultProps = Map(
ProducerConfig.RETRY_BACKOFF_MS_CONFIG -> "100",
ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG -> "200",
ProducerConfig.LINGER_MS_CONFIG -> lingerMs.toString
)
defaultProps.foreach { case (key, value) =>
if (!producerProps.containsKey(key)) producerProps.put(key, value)
}
/*
* It uses CommonClientConfigs.SECURITY_PROTOCOL_CONFIG to determine whether
* securityConfigs has been invoked already. For example, we need to
* invoke it before this call in IntegrationTestHarness, otherwise the
* SSL client auth fails.
*/
if (!producerProps.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG))
producerProps ++= producerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaProducer[K, V](producerProps, keySerializer, valueSerializer)
}
def usesSslTransportLayer(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SSL | SecurityProtocol.SASL_SSL => true
case _ => false
}
def usesSaslAuthentication(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SASL_PLAINTEXT | SecurityProtocol.SASL_SSL => true
case _ => false
}
def consumerSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "consumer", SslCertificateCn, saslProperties)
def adminClientSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "admin-client", SslCertificateCn, saslProperties)
/**
* Create a new consumer with a few pre-configured properties.
*/
def createNewConsumer[K, V](brokerList: String,
groupId: String = "group",
autoOffsetReset: String = "earliest",
partitionFetchSize: Long = 4096L,
partitionAssignmentStrategy: String = classOf[RangeAssignor].getName,
sessionTimeout: Int = 30000,
securityProtocol: SecurityProtocol,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keyDeserializer: Deserializer[K] = new ByteArrayDeserializer,
valueDeserializer: Deserializer[V] =new ByteArrayDeserializer,
props: Option[Properties] = None) : KafkaConsumer[K, V] = {
import org.apache.kafka.clients.consumer.ConsumerConfig
val consumerProps = props.getOrElse(new Properties())
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset)
consumerProps.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, partitionFetchSize.toString)
val defaultProps = Map(
ConsumerConfig.RETRY_BACKOFF_MS_CONFIG -> "100",
ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG -> "200",
ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG -> partitionAssignmentStrategy,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG -> sessionTimeout.toString,
ConsumerConfig.GROUP_ID_CONFIG -> groupId)
defaultProps.foreach { case (key, value) =>
if (!consumerProps.containsKey(key)) consumerProps.put(key, value)
}
/*
* It uses CommonClientConfigs.SECURITY_PROTOCOL_CONFIG to determine whether
* securityConfigs has been invoked already. For example, we need to
* invoke it before this call in IntegrationTestHarness, otherwise the
* SSL client auth fails.
*/
if(!consumerProps.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG))
consumerProps ++= consumerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaConsumer[K, V](consumerProps, keyDeserializer, valueDeserializer)
}
/**
* Create a default producer config properties map with the given metadata broker list
*/
def getProducerConfig(brokerList: String): Properties = {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
props.put("message.send.max.retries", "5")
props.put("retry.backoff.ms", "1000")
props.put("request.timeout.ms", "2000")
props.put("request.required.acks", "-1")
props.put("send.buffer.bytes", "65536")
props
}
@deprecated("This method has been deprecated and will be removed in a future release", "0.11.0.0")
def getSyncProducerConfig(port: Int): Properties = {
val props = new Properties()
props.put("host", "localhost")
props.put("port", port.toString)
props.put("request.timeout.ms", "10000")
props.put("request.required.acks", "1")
props.put("serializer.class", classOf[StringEncoder].getName)
props
}
@deprecated("This method has been deprecated and will be removed in a future release.", "0.11.0.0")
def updateConsumerOffset(config : ConsumerConfig, path : String, offset : Long) = {
val zkUtils = ZkUtils(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, false)
zkUtils.updatePersistentPath(path, offset.toString)
zkUtils.close()
}
def getMessageIterator(iter: Iterator[MessageAndOffset]): Iterator[Message] = {
new IteratorTemplate[Message] {
override def makeNext(): Message = {
if (iter.hasNext)
iter.next.message
else
allDone()
}
}
}
def createBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] =
createBrokersInZk(ids.map(kafka.admin.BrokerMetadata(_, None)), zkClient)
def createBrokersInZk(brokerMetadatas: Seq[kafka.admin.BrokerMetadata], zkClient: KafkaZkClient): Seq[Broker] = {
zkClient.makeSurePersistentPathExists(BrokerIdsZNode.path)
val brokers = brokerMetadatas.map { b =>
val protocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(protocol)
Broker(b.id, Seq(EndPoint("localhost", 6667, listenerName, protocol)), b.rack)
}
brokers.foreach(b => zkClient.registerBrokerInZk(BrokerInfo(Broker(b.id, b.endPoints, rack = b.rack),
ApiVersion.latestVersion, jmxPort = -1)))
brokers
}
def deleteBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] = {
val brokers = ids.map(createBroker(_, "localhost", 6667, SecurityProtocol.PLAINTEXT))
ids.foreach(b => zkClient.deletePath(BrokerIdsZNode.path + "/" + b))
brokers
}
def getMsgStrings(n: Int): Seq[String] = {
val buffer = new ListBuffer[String]
for (i <- 0 until n)
buffer += ("msg" + i)
buffer
}
/**
* Create a wired format request based on simple basic information
*/
@deprecated("This method has been deprecated and it will be removed in a future release", "0.10.0.0")
def produceRequest(topic: String,
partition: Int,
message: ByteBufferMessageSet,
acks: Int,
timeout: Int,
correlationId: Int = 0,
clientId: String): ProducerRequest = {
produceRequestWithAcks(Seq(topic), Seq(partition), message, acks, timeout, correlationId, clientId)
}
@deprecated("This method has been deprecated and it will be removed in a future release", "0.10.0.0")
def produceRequestWithAcks(topics: Seq[String],
partitions: Seq[Int],
message: ByteBufferMessageSet,
acks: Int,
timeout: Int,
correlationId: Int = 0,
clientId: String): ProducerRequest = {
val data = topics.flatMap(topic =>
partitions.map(partition => (TopicAndPartition(topic, partition), message))
)
new ProducerRequest(correlationId, clientId, acks.toShort, timeout, collection.mutable.Map(data:_*))
}
def makeLeaderForPartition(zkClient: KafkaZkClient,
topic: String,
leaderPerPartitionMap: scala.collection.immutable.Map[Int, Int],
controllerEpoch: Int) {
val newLeaderIsrAndControllerEpochs = leaderPerPartitionMap.map { case (partition, leader) =>
val topicPartition = new TopicPartition(topic, partition)
val newLeaderAndIsr = zkClient.getTopicPartitionState(topicPartition)
.map(_.leaderAndIsr.newLeader(leader))
.getOrElse(LeaderAndIsr(leader, List(leader)))
topicPartition -> LeaderIsrAndControllerEpoch(newLeaderAndIsr, controllerEpoch)
}
zkClient.setTopicPartitionStatesRaw(newLeaderIsrAndControllerEpochs)
}
/**
* If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected.
* If oldLeaderOpt is defined, it waits until the new leader is different from the old leader.
* If newLeaderOpt is defined, it waits until the new leader becomes the expected new leader.
*
* @return The new leader (note that negative values are used to indicate conditions like NoLeader and
* LeaderDuringDelete).
* @throws AssertionError if the expected condition is not true within the timeout.
*/
def waitUntilLeaderIsElectedOrChanged(zkClient: KafkaZkClient, topic: String, partition: Int, timeoutMs: Long = 30000L,
oldLeaderOpt: Option[Int] = None, newLeaderOpt: Option[Int] = None): Int = {
require(!(oldLeaderOpt.isDefined && newLeaderOpt.isDefined), "Can't define both the old and the new leader")
val startTime = System.currentTimeMillis()
val topicPartition = new TopicPartition(topic, partition)
trace(s"Waiting for leader to be elected or changed for partition $topicPartition, old leader is $oldLeaderOpt, " +
s"new leader is $newLeaderOpt")
var leader: Option[Int] = None
var electedOrChangedLeader: Option[Int] = None
while (electedOrChangedLeader.isEmpty && System.currentTimeMillis() < startTime + timeoutMs) {
// check if leader is elected
leader = zkClient.getLeaderForPartition(topicPartition)
leader match {
case Some(l) => (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) if newLeader == l =>
trace(s"Expected new leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case (_, Some(oldLeader)) if oldLeader != l =>
trace(s"Leader for partition $topicPartition is changed from $oldLeader to $l")
electedOrChangedLeader = leader
case (None, None) =>
trace(s"Leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case _ =>
trace(s"Current leader for partition $topicPartition is $l")
}
case None =>
trace(s"Leader for partition $topicPartition is not elected yet")
}
Thread.sleep(math.min(timeoutMs, 100L))
}
electedOrChangedLeader.getOrElse {
val errorMessage = (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) =>
s"Timing out after $timeoutMs ms since expected new leader $newLeader was not elected for partition $topicPartition, leader is $leader"
case (_, Some(oldLeader)) =>
s"Timing out after $timeoutMs ms since a new leader that is different from $oldLeader was not elected for partition $topicPartition, " +
s"leader is $leader"
case _ =>
s"Timing out after $timeoutMs ms since a leader was not elected for partition $topicPartition"
}
fail(errorMessage)
}
}
/**
* Execute the given block. If it throws an assert error, retry. Repeat
* until no error is thrown or the time limit elapses
*/
def retry(maxWaitMs: Long)(block: => Unit) {
var wait = 1L
val startTime = System.currentTimeMillis()
while(true) {
try {
block
return
} catch {
case e: AssertionError =>
val elapsed = System.currentTimeMillis - startTime
if (elapsed > maxWaitMs) {
throw e
} else {
info("Attempt failed, sleeping for " + wait + ", and then retrying.")
Thread.sleep(wait)
wait += math.min(wait, 1000)
}
}
}
}
/**
* Wait until the given condition is true or throw an exception if the given wait time elapses.
*/
def waitUntilTrue(condition: () => Boolean, msg: => String,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L): Unit = {
val startTime = System.currentTimeMillis()
while (true) {
if (condition())
return
if (System.currentTimeMillis() > startTime + waitTime)
fail(msg)
Thread.sleep(waitTime.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
/**
* Invoke `compute` until `predicate` is true or `waitTime` elapses.
*
* Return the last `compute` result and a boolean indicating whether `predicate` succeeded for that value.
*
* This method is useful in cases where `waitUntilTrue` makes it awkward to provide good error messages.
*/
def computeUntilTrue[T](compute: => T, waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L)(
predicate: T => Boolean): (T, Boolean) = {
val startTime = System.currentTimeMillis()
while (true) {
val result = compute
if (predicate(result))
return result -> true
if (System.currentTimeMillis() > startTime + waitTime)
return result -> false
Thread.sleep(waitTime.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
def isLeaderLocalOnBroker(topic: String, partitionId: Int, server: KafkaServer): Boolean = {
server.replicaManager.getPartition(new TopicPartition(topic, partitionId)).exists(_.leaderReplicaIfLocal.isDefined)
}
def createRequestByteBuffer(request: RequestOrResponse): ByteBuffer = {
val byteBuffer = ByteBuffer.allocate(request.sizeInBytes + 2)
byteBuffer.putShort(request.requestId.get)
request.writeTo(byteBuffer)
byteBuffer.rewind()
byteBuffer
}
/**
* Wait until all brokers know about each other.
*
* @param servers The Kafka broker servers.
* @param timeout The amount of time waiting on this condition before assert to fail
*/
def waitUntilBrokerMetadataIsPropagated(servers: Seq[KafkaServer],
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
val expectedBrokerIds = servers.map(_.config.brokerId).toSet
TestUtils.waitUntilTrue(() => servers.forall(server =>
expectedBrokerIds == server.apis.metadataCache.getAliveBrokers.map(_.id).toSet
), "Timed out waiting for broker metadata to propagate to all servers", timeout)
}
/**
* Wait until a valid leader is propagated to the metadata cache in each broker.
* It assumes that the leader propagated to each broker is the same.
*
* @param servers The list of servers that the metadata should reach to
* @param topic The topic name
* @param partition The partition Id
* @param timeout The amount of time waiting on this condition before assert to fail
* @return The leader of the partition.
*/
def waitUntilMetadataIsPropagated(servers: Seq[KafkaServer], topic: String, partition: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
var leader: Int = -1
TestUtils.waitUntilTrue(() =>
servers.foldLeft(true) {
(result, server) =>
val partitionStateOpt = server.apis.metadataCache.getPartitionInfo(topic, partition)
partitionStateOpt match {
case None => false
case Some(partitionState) =>
leader = partitionState.basePartitionState.leader
result && Request.isValidBrokerId(leader)
}
},
"Partition [%s,%d] metadata not propagated after %d ms".format(topic, partition, timeout),
waitTime = timeout)
leader
}
def waitUntilControllerElected(zkClient: KafkaZkClient, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
val (controllerId, _) = TestUtils.computeUntilTrue(zkClient.getControllerId, waitTime = timeout)(_.isDefined)
controllerId.getOrElse(fail(s"Controller not elected after $timeout ms"))
}
def waitUntilLeaderIsKnown(servers: Seq[KafkaServer], topic: String, partition: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
val tp = new TopicPartition(topic, partition)
TestUtils.waitUntilTrue(() =>
servers.exists { server =>
server.replicaManager.getPartition(tp).exists(_.leaderReplicaIfLocal.isDefined)
}, s"Partition $tp leaders not made yet after $timeout ms", waitTime = timeout
)
}
def writeNonsenseToFile(fileName: File, position: Long, size: Int) {
val file = new RandomAccessFile(fileName, "rw")
file.seek(position)
for (_ <- 0 until size)
file.writeByte(random.nextInt(255))
file.close()
}
def appendNonsenseToFile(fileName: File, size: Int) {
val file = new FileOutputStream(fileName, true)
for (_ <- 0 until size)
file.write(random.nextInt(255))
file.close()
}
def checkForPhantomInSyncReplicas(zkClient: KafkaZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int]) {
val inSyncReplicas = zkClient.getInSyncReplicasForPartition(new TopicPartition(topic, partitionToBeReassigned))
// in sync replicas should not have any replica that is not in the new assigned replicas
val phantomInSyncReplicas = inSyncReplicas.get.toSet -- assignedReplicas.toSet
assertTrue("All in sync replicas %s must be in the assigned replica list %s".format(inSyncReplicas, assignedReplicas),
phantomInSyncReplicas.isEmpty)
}
def ensureNoUnderReplicatedPartitions(zkClient: KafkaZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int],
servers: Seq[KafkaServer]) {
val topicPartition = new TopicPartition(topic, partitionToBeReassigned)
TestUtils.waitUntilTrue(() => {
val inSyncReplicas = zkClient.getInSyncReplicasForPartition(topicPartition)
inSyncReplicas.get.size == assignedReplicas.size
},
"Reassigned partition [%s,%d] is under replicated".format(topic, partitionToBeReassigned))
var leader: Option[Int] = None
TestUtils.waitUntilTrue(() => {
leader = zkClient.getLeaderForPartition(topicPartition)
leader.isDefined
},
"Reassigned partition [%s,%d] is unavailable".format(topic, partitionToBeReassigned))
TestUtils.waitUntilTrue(() => {
val leaderBroker = servers.filter(s => s.config.brokerId == leader.get).head
leaderBroker.replicaManager.underReplicatedPartitionCount == 0
},
"Reassigned partition [%s,%d] is under-replicated as reported by the leader %d".format(topic, partitionToBeReassigned, leader.get))
}
def verifyNonDaemonThreadsStatus(threadNamePrefix: String) {
val threadCount = Thread.getAllStackTraces.keySet.asScala.count { t =>
!t.isDaemon && t.isAlive && t.getName.startsWith(threadNamePrefix)
}
assertEquals(0, threadCount)
}
/**
* Create new LogManager instance with default configuration for testing
*/
def createLogManager(logDirs: Seq[File] = Seq.empty[File],
defaultConfig: LogConfig = LogConfig(),
cleanerConfig: CleanerConfig = CleanerConfig(enableCleaner = false),
time: MockTime = new MockTime()): LogManager = {
new LogManager(logDirs = logDirs,
initialOfflineDirs = Array.empty[File],
topicConfigs = Map(),
initialDefaultConfig = defaultConfig,
cleanerConfig = cleanerConfig,
recoveryThreadsPerDataDir = 4,
flushCheckMs = 1000L,
flushRecoveryOffsetCheckpointMs = 10000L,
flushStartOffsetCheckpointMs = 10000L,
retentionCheckMs = 1000L,
maxPidExpirationMs = 60 * 60 * 1000,
scheduler = time.scheduler,
time = time,
brokerState = BrokerState(),
brokerTopicStats = new BrokerTopicStats,
logDirFailureChannel = new LogDirFailureChannel(logDirs.size))
}
@deprecated("This method has been deprecated and it will be removed in a future release.", "0.10.0.0")
def sendMessages(servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
partition: Int = -1,
compression: CompressionCodec = NoCompressionCodec): List[String] = {
val header = "test-%d".format(partition)
val props = new Properties()
props.put("compression.codec", compression.codec.toString)
val ms = 0.until(numMessages).map(x => header + "-" + x)
// Specific Partition
if (partition >= 0) {
val producer: Producer[Int, String] =
createProducer(TestUtils.getBrokerListStrFromServers(servers),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[IntEncoder].getName,
partitioner = classOf[FixedValuePartitioner].getName,
producerProps = props)
producer.send(ms.map(m => new KeyedMessage[Int, String](topic, partition, m)): _*)
debug("Sent %d messages for partition [%s,%d]".format(ms.size, topic, partition))
producer.close()
ms.toList
} else {
// Use topic as the key to determine partition
val producer: Producer[String, String] = createProducer(
TestUtils.getBrokerListStrFromServers(servers),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[DefaultPartitioner].getName,
producerProps = props)
producer.send(ms.map(m => new KeyedMessage[String, String](topic, topic, m)): _*)
producer.close()
debug("Sent %d messages for topic [%s]".format(ms.size, topic))
ms.toList
}
}
def produceMessages(servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
acks: Int = -1,
valueBytes: Int = -1): Seq[Array[Byte]] = {
val producer = createNewProducer(
TestUtils.getBrokerListStrFromServers(servers),
retries = 5,
acks = acks
)
val values = try {
val curValues = (0 until numMessages).map(x => valueBytes match {
case -1 => s"test-$x".getBytes
case _ => new Array[Byte](valueBytes)
})
val futures = curValues.map { value =>
producer.send(new ProducerRecord(topic, value))
}
futures.foreach(_.get)
curValues
} finally {
producer.close()
}
debug(s"Sent ${values.size} messages for topic [$topic]")
values
}
def produceMessage(servers: Seq[KafkaServer], topic: String, message: String) {
val producer = createNewProducer(
TestUtils.getBrokerListStrFromServers(servers),
retries = 5
)
producer.send(new ProducerRecord(topic, topic.getBytes, message.getBytes)).get
producer.close()
}
/**
* Consume all messages (or a specific number of messages)
*
* @param topicMessageStreams the Topic Message Streams
* @param nMessagesPerThread an optional field to specify the exact number of messages to be returned.
* ConsumerTimeoutException will be thrown if there are no messages to be consumed.
* If not specified, then all available messages will be consumed, and no exception is thrown.
* @return the list of messages consumed.
*/
@deprecated("This method has been deprecated and will be removed in a future release.", "0.11.0.0")
def getMessages(topicMessageStreams: Map[String, List[KafkaStream[String, String]]],
nMessagesPerThread: Int = -1): List[String] = {
var messages: List[String] = Nil
val shouldGetAllMessages = nMessagesPerThread < 0
for (messageStreams <- topicMessageStreams.values) {
for (messageStream <- messageStreams) {
val iterator = messageStream.iterator()
try {
var i = 0
while ((shouldGetAllMessages && iterator.hasNext()) || (i < nMessagesPerThread)) {
assertTrue(iterator.hasNext)
val message = iterator.next().message // will throw a timeout exception if the message isn't there
messages ::= message
debug("received message: " + message)
i += 1
}
} catch {
case e: ConsumerTimeoutException =>
if (shouldGetAllMessages) {
// swallow the exception
debug("consumer timed out after receiving " + messages.length + " message(s).")
} else {
throw e
}
}
}
}
messages.reverse
}
def verifyTopicDeletion(zkClient: KafkaZkClient, topic: String, numPartitions: Int, servers: Seq[KafkaServer]) {
val topicPartitions = (0 until numPartitions).map(new TopicPartition(topic, _))
// wait until admin path for delete topic is deleted, signaling completion of topic deletion
TestUtils.waitUntilTrue(() => !zkClient.isTopicMarkedForDeletion(topic),
"Admin path /admin/delete_topic/%s path not deleted even after a replica is restarted".format(topic))
TestUtils.waitUntilTrue(() => !zkClient.topicExists(topic),
"Topic path /brokers/topics/%s not deleted after /admin/delete_topic/%s path is deleted".format(topic, topic))
// ensure that the topic-partition has been deleted from all brokers' replica managers
TestUtils.waitUntilTrue(() =>
servers.forall(server => topicPartitions.forall(tp => server.replicaManager.getPartition(tp).isEmpty)),
"Replica manager's should have deleted all of this topic's partitions")
// ensure that logs from all replicas are deleted if delete topic is marked successful in ZooKeeper
assertTrue("Replica logs not deleted after delete topic is complete",
servers.forall(server => topicPartitions.forall(tp => server.getLogManager.getLog(tp).isEmpty)))
// ensure that topic is removed from all cleaner offsets
TestUtils.waitUntilTrue(() => servers.forall(server => topicPartitions.forall { tp =>
val checkpoints = server.getLogManager.liveLogDirs.map { logDir =>
new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint")).read()
}
checkpoints.forall(checkpointsPerLogDir => !checkpointsPerLogDir.contains(tp))
}), "Cleaner offset for deleted partition should have been removed")
import scala.collection.JavaConverters._
TestUtils.waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!new File(logDir, tp.topic + "-" + tp.partition).exists()
}
}
), "Failed to soft-delete the data to a delete directory")
TestUtils.waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!java.util.Arrays.asList(new File(logDir).list()).asScala.exists { partitionDirectoryName =>
partitionDirectoryName.startsWith(tp.topic + "-" + tp.partition) &&
partitionDirectoryName.endsWith(Log.DeleteDirSuffix)
}
}
}
), "Failed to hard-delete the delete directory")
}
/**
* Translate the given buffer into a string
*
* @param buffer The buffer to translate
* @param encoding The encoding to use in translating bytes to characters
*/
def readString(buffer: ByteBuffer, encoding: String = Charset.defaultCharset.toString): String = {
val bytes = new Array[Byte](buffer.remaining)
buffer.get(bytes)
new String(bytes, encoding)
}
def copyOf(props: Properties): Properties = {
val copy = new Properties()
copy ++= props
copy
}
def sslConfigs(mode: Mode, clientCert: Boolean, trustStoreFile: Option[File], certAlias: String,
certCn: String = SslCertificateCn): Properties = {
val trustStore = trustStoreFile.getOrElse {
throw new Exception("SSL enabled but no trustStoreFile provided")
}
val sslConfigs = TestSslUtils.createSslConfig(clientCert, true, mode, trustStore, certAlias, certCn)
val sslProps = new Properties()
sslConfigs.asScala.foreach { case (k, v) => sslProps.put(k, v) }
sslProps
}
// a X509TrustManager to trust self-signed certs for unit tests.
def trustAllCerts: X509TrustManager = {
val trustManager = new X509TrustManager() {
override def getAcceptedIssuers: Array[X509Certificate] = {
null
}
override def checkClientTrusted(certs: Array[X509Certificate], authType: String) {
}
override def checkServerTrusted(certs: Array[X509Certificate], authType: String) {
}
}
trustManager
}
def waitAndVerifyAcls(expected: Set[Acl], authorizer: Authorizer, resource: Resource) = {
TestUtils.waitUntilTrue(() => authorizer.getAcls(resource) == expected,
s"expected acls $expected but got ${authorizer.getAcls(resource)}", waitTime = JTestUtils.DEFAULT_MAX_WAIT_MS)
}
/**
* Verifies that this ACL is the secure one.
*/
def isAclSecure(acl: ACL, sensitive: Boolean): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.READ => !sensitive && acl.getId.getScheme == "world"
case Perms.ALL => acl.getId.getScheme == "sasl"
case _ => false
}
}
/**
* Verifies that the ACL corresponds to the unsecure one that
* provides ALL access to everyone (world).
*/
def isAclUnsecure(acl: ACL): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.ALL => acl.getId.getScheme == "world"
case _ => false
}
}
private def secureZkPaths(zkUtils: ZkUtils): Seq[String] = {
def subPaths(path: String): Seq[String] = {
if (zkUtils.pathExists(path))
path +: zkUtils.getChildren(path).map(c => path + "/" + c).flatMap(subPaths)
else
Seq.empty
}
val topLevelPaths = ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths
topLevelPaths.flatMap(subPaths)
}
/**
* Verifies that all secure paths in ZK are created with the expected ACL.
*/
def verifySecureZkAcls(zkUtils: ZkUtils, usersWithAccess: Int) {
secureZkPaths(zkUtils).foreach(path => {
if (zkUtils.pathExists(path)) {
val sensitive = ZkUtils.sensitivePath(path)
// usersWithAccess have ALL access to path. For paths that are
// not sensitive, world has READ access.
val aclCount = if (sensitive) usersWithAccess else usersWithAccess + 1
val acls = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Invalid ACLs for $path $acls", aclCount, acls.size)
acls.asScala.foreach(acl => isAclSecure(acl, sensitive))
}
})
}
/**
* Verifies that secure paths in ZK have no access control. This is
* the case when zookeeper.set.acl=false and no ACLs have been configured.
*/
def verifyUnsecureZkAcls(zkUtils: ZkUtils) {
secureZkPaths(zkUtils).foreach(path => {
if (zkUtils.pathExists(path)) {
val acls = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Invalid ACLs for $path $acls", 1, acls.size)
acls.asScala.foreach(isAclUnsecure)
}
})
}
/**
* To use this you pass in a sequence of functions that are your arrange/act/assert test on the SUT.
* They all run at the same time in the assertConcurrent method; the chances of triggering a multithreading code error,
* and thereby failing some assertion are greatly increased.
*/
def assertConcurrent(message: String, functions: Seq[() => Any], timeoutMs: Int) {
def failWithTimeout() {
fail(s"$message. Timed out, the concurrent functions took more than $timeoutMs milliseconds")
}
val numThreads = functions.size
val threadPool = Executors.newFixedThreadPool(numThreads)
val exceptions = ArrayBuffer[Throwable]()
try {
val runnables = functions.map { function =>
new Callable[Unit] {
override def call(): Unit = function()
}
}.asJava
val futures = threadPool.invokeAll(runnables, timeoutMs, TimeUnit.MILLISECONDS).asScala
futures.foreach { future =>
if (future.isCancelled)
failWithTimeout()
else
try future.get()
catch { case e: Exception =>
exceptions += e
}
}
} catch {
case _: InterruptedException => failWithTimeout()
case e: Throwable => exceptions += e
} finally {
threadPool.shutdownNow()
}
assertTrue(s"$message failed with exception(s) $exceptions", exceptions.isEmpty)
}
def consumeTopicRecords[K, V](servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val consumer = createNewConsumer(TestUtils.getBrokerListStrFromServers(servers, securityProtocol),
securityProtocol = securityProtocol, trustStoreFile = trustStoreFile)
try {
consumer.subscribe(Collections.singleton(topic))
consumeRecords(consumer, numMessages, waitTime)
} finally consumer.close()
}
def consumeRecords[K, V](consumer: KafkaConsumer[K, V], numMessages: Int,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[K, V]] = {
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
records.size >= numMessages
}, s"Consumed ${records.size} records until timeout instead of the expected $numMessages records", waitTime)
assertEquals("Consumed more records than expected", numMessages, records.size)
records
}
/**
* Will consume all the records for the given consumer for the specified duration. If you want to drain all the
* remaining messages in the partitions the consumer is subscribed to, the duration should be set high enough so
* that the consumer has enough time to poll everything. This would be based on the number of expected messages left
* in the topic, and should not be too large (ie. more than a second) in our tests.
*
* @return All the records consumed by the consumer within the specified duration.
*/
def consumeRecordsFor[K, V](consumer: KafkaConsumer[K, V], duration: Long): Seq[ConsumerRecord[K, V]] = {
val startTime = System.currentTimeMillis()
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
System.currentTimeMillis() - startTime > duration
}, s"The timeout $duration was greater than the maximum wait time.")
records
}
def createTransactionalProducer(transactionalId: String, servers: Seq[KafkaServer], batchSize: Int = 16384,
transactionTimeoutMs: Long = 60000) = {
val props = new Properties()
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId)
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5")
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, transactionTimeoutMs.toString)
TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers), retries = Integer.MAX_VALUE, acks = -1, props = Some(props))
}
// Seeds the given topic with records with keys and values in the range [0..numRecords)
def seedTopicWithNumberedRecords(topic: String, numRecords: Int, servers: Seq[KafkaServer]): Unit = {
val props = new Properties()
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
val producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers),
retries = Integer.MAX_VALUE, acks = -1, props = Some(props))
try {
for (i <- 0 until numRecords) {
producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, asBytes(i.toString), asBytes(i.toString)))
}
producer.flush()
} finally {
producer.close()
}
}
private def asString(bytes: Array[Byte]) = new String(bytes, StandardCharsets.UTF_8)
private def asBytes(string: String) = string.getBytes(StandardCharsets.UTF_8)
// Verifies that the record was intended to be committed by checking the headers for an expected transaction status
// If true, this will return the value as a string. It is expected that the record in question should have been created
// by the `producerRecordWithExpectedTransactionStatus` method.
def assertCommittedAndGetValue(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
record.headers.headers(transactionStatusKey).asScala.headOption match {
case Some(header) =>
assertEquals(s"Got ${asString(header.value)} but expected the value to indicate " +
s"committed status.", asString(committedValue), asString(header.value))
case None =>
fail("expected the record header to include an expected transaction status, but received nothing.")
}
recordValueAsString(record)
}
def recordValueAsString(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
asString(record.value)
}
def producerRecordWithExpectedTransactionStatus(topic: String, key: Array[Byte], value: Array[Byte],
willBeCommitted: Boolean) : ProducerRecord[Array[Byte], Array[Byte]] = {
val header = new Header {override def key() = transactionStatusKey
override def value() = if (willBeCommitted)
committedValue
else
abortedValue
}
new ProducerRecord[Array[Byte], Array[Byte]](topic, null, key, value, Collections.singleton(header))
}
def producerRecordWithExpectedTransactionStatus(topic: String, key: String, value: String,
willBeCommitted: Boolean) : ProducerRecord[Array[Byte], Array[Byte]] = {
producerRecordWithExpectedTransactionStatus(topic, asBytes(key), asBytes(value), willBeCommitted)
}
// Collect the current positions for all partition in the consumers current assignment.
def consumerPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]) : Map[TopicPartition, OffsetAndMetadata] = {
val offsetsToCommit = new mutable.HashMap[TopicPartition, OffsetAndMetadata]()
consumer.assignment.asScala.foreach { topicPartition =>
offsetsToCommit.put(topicPartition, new OffsetAndMetadata(consumer.position(topicPartition)))
}
offsetsToCommit.toMap
}
def pollUntilAtLeastNumRecords(consumer: KafkaConsumer[Array[Byte], Array[Byte]], numRecords: Int): Seq[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val records = new ArrayBuffer[ConsumerRecord[Array[Byte], Array[Byte]]]()
TestUtils.waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
records.size >= numRecords
}, s"Consumed ${records.size} records until timeout, but expected $numRecords records.")
records
}
def resetToCommittedPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]) = {
consumer.assignment.asScala.foreach { case(topicPartition) =>
val offset = consumer.committed(topicPartition)
if (offset != null)
consumer.seek(topicPartition, offset.offset)
else
consumer.seekToBeginning(Collections.singletonList(topicPartition))
}
}
def alterConfigs(servers: Seq[KafkaServer], adminClient: AdminClient, props: Properties,
perBrokerConfig: Boolean): AlterConfigsResult = {
val configEntries = props.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava
val newConfig = new Config(configEntries)
val configs = if (perBrokerConfig) {
servers.map { server =>
val resource = new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString)
(resource, newConfig)
}.toMap.asJava
} else {
Map(new ConfigResource(ConfigResource.Type.BROKER, "") -> newConfig).asJava
}
adminClient.alterConfigs(configs)
}
/**
* Capture the console output during the execution of the provided function.
*/
def grabConsoleOutput(f: => Unit) : String = {
val out = new ByteArrayOutputStream
try scala.Console.withOut(out)(f)
finally scala.Console.out.flush()
out.toString
}
/**
* Capture the console error during the execution of the provided function.
*/
def grabConsoleError(f: => Unit) : String = {
val err = new ByteArrayOutputStream
try scala.Console.withErr(err)(f)
finally scala.Console.err.flush()
err.toString
}
/**
* Capture both the console output and console error during the execution of the provided function.
*/
def grabConsoleOutputAndError(f: => Unit) : (String, String) = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
try scala.Console.withOut(out)(scala.Console.withErr(err)(f))
finally {
scala.Console.out.flush()
scala.Console.err.flush()
}
(out.toString, err.toString)
}
}
class IntEncoder(props: VerifiableProperties = null) extends Encoder[Int] {
override def toBytes(n: Int) = n.toString.getBytes
}
@deprecated("This class is deprecated and it will be removed in a future release.", "0.10.0.0")
class StaticPartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = {
data.asInstanceOf[String].length % numPartitions
}
}
@deprecated("This class has been deprecated and it will be removed in a future release.", "0.10.0.0")
class FixedValuePartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = data.asInstanceOf[Int]
}
| sebadiaz/kafka | core/src/test/scala/unit/kafka/utils/TestUtils.scala | Scala | apache-2.0 | 67,082 |
// Distributed under the MIT license, see: http://www.opensource.org/licenses/MIT
package org.orbeon.errorified
// Exceptions utilities
// Uses reflection to find nested causes when exceptions don't support Java's getCause
object Exceptions {
// Returns the exception directly nested
// This first tries reflection and then falls back to the standard getCause
def getNestedThrowable(t: Throwable): Option[Throwable] = {
// Create a map of all classes and interfaces implemented by the throwable
val throwableClasses = {
def superIterator = Iterator.iterate[Class[_]](t.getClass)(_.getSuperclass) takeWhile (_ ne null)
def interfaceIterator = t.getClass.getInterfaces.iterator
// NOTE: toMap's type parameters are needed with 2.9.2, but not with 2.10
(superIterator ++ interfaceIterator map (c ⇒ c.getName → c)).toMap[String, Class[_]]
}
// Invoke the given getter on t
def invokeGetter(clazz: Class[_], getter: String): Option[Throwable] =
try {
val method = clazz.getMethod(getter)
val result = method.invoke(t)
Option(result.asInstanceOf[Throwable])
} catch {
case _: Throwable ⇒ None
}
// Try to find a match
Getters find
{ case (clazz, _) ⇒ throwableClasses.contains(clazz) } flatMap
{ case (clazz, getter) ⇒ invokeGetter(throwableClasses(clazz), getter) }
}
// Typically for Java callers
def getNestedThrowableOrNull(t: Throwable) = getNestedThrowable(t) orNull
// Iterator down a throwable's causes
def causesIterator(t: Throwable): Iterator[Throwable] =
Iterator.iterate(t)(getNestedThrowableOrNull).takeWhile(_ ne null)
// Get the root cause of the throwable
def getRootThrowable(t: Throwable): Throwable =
causesIterator(t).toList.last
val Getters = Seq(
"javax.xml.transform.TransformerException" → "getException",
"org.xml.sax.SAXException" → "getException",
"java.lang.reflect.InvocationTargetException" → "getTargetException",
"javax.servlet.ServletException" → "getRootCause",
"org.apache.batik.transcoder.TranscoderException" → "getException",
"orbeon.apache.xml.utils.WrappedRuntimeException" → "getException",
"org.iso_relax.verifier.VerifierConfigurationException" → "getCauseException",
"com.drew.lang.CompoundException" → "getInnerException",
"com.lowagie.text.ExceptionConverter" → "getException",
"java.lang.Throwable" → "getCause"
)
}
| orbeon/errorified | src/main/scala/org/orbeon/errorified/Exceptions.scala | Scala | mit | 2,840 |
package com.mattrjacobs.rxscala.slides
import rx.lang.scala.Observable
trait Slide91 extends App {
type SomeData
val a: Observable[SomeData]
val b: Observable[String]
a.zip(b).subscribe(
pair => println("a: " + pair._1 + " b: " + pair._2),
ex => println("error occurred: " + ex.getMessage),
() => println("completed"))
}
| mattrjacobs/RxScalaDemo | src/main/scala/slides/Slide91.scala | Scala | apache-2.0 | 345 |
package mesosphere.marathon.core.matcher.base
import mesosphere.marathon.core.launcher.TaskOp
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.state.{ PathId, Timestamp }
import org.apache.mesos.{ Protos => Mesos }
import scala.concurrent.Future
object OfferMatcher {
/**
* A TaskOp with a [[TaskOpSource]].
*
* The [[TaskOpSource]] is informed whether the op is ultimately send to Mesos or if it is rejected
* (e.g. by throttling logic).
*/
case class TaskOpWithSource(source: TaskOpSource, op: TaskOp) {
def taskId: Task.Id = op.taskId
def accept(): Unit = source.taskOpAccepted(op)
def reject(reason: String): Unit = source.taskOpRejected(op, reason)
}
/**
* Reply from an offer matcher to a MatchOffer. If the offer match
* could not match the offer in any way it should simply leave the tasks
* collection empty.
*
* To increase fairness between matchers, each normal matcher should schedule as few operations
* as possible per offer per match, e.g. one for task launches without reservations. Multiple launches could be used
* if the tasks need to be colocated or if the operations are intrinsically dependent on each other.
* The OfferMultiplexer tries to summarize suitable
* matches from multiple offer matches into one response.
*
* A MatchedTaskOps reply does not guarantee that these operations can actually be executed.
* The launcher of message should setup some kind of timeout mechanism and handle
* taskOpAccepted/taskOpRejected calls appropriately.
*
* @param offerId the identifier of the offer
* @param opsWithSource the ops that should be executed on that offer including the source of each op
* @param resendThisOffer true, if this offer could not be processed completely (e.g. timeout)
* and should be resend and processed again
*/
case class MatchedTaskOps(
offerId: Mesos.OfferID,
opsWithSource: Seq[TaskOpWithSource],
resendThisOffer: Boolean = false) {
/** all included [TaskOp] without the source information. */
def ops: Iterable[TaskOp] = opsWithSource.view.map(_.op)
/** All TaskInfos of launched tasks. */
def launchedTaskInfos: Iterable[Mesos.TaskInfo] = ops.view.collect {
case TaskOp.Launch(taskInfo, _, _, _) => taskInfo
}
}
object MatchedTaskOps {
def noMatch(offerId: Mesos.OfferID, resendThisOffer: Boolean = false): MatchedTaskOps =
new MatchedTaskOps(offerId, Seq.empty, resendThisOffer = resendThisOffer)
}
trait TaskOpSource {
def taskOpAccepted(taskOp: TaskOp)
def taskOpRejected(taskOp: TaskOp, reason: String)
}
}
/**
* Tries to match offers with given tasks.
*/
trait OfferMatcher {
/**
* Process offer and return the ops that this matcher wants to execute on this offer.
*
* The offer matcher can expect either a taskOpAccepted or a taskOpRejected call
* for every returned `org.apache.mesos.Protos.TaskInfo`.
*/
def matchOffer(deadline: Timestamp, offer: Mesos.Offer): Future[OfferMatcher.MatchedTaskOps]
/**
* We can optimize the offer routing for different offer matcher in case there are reserved resources.
* A defined precedence is used to filter incoming offers with reservations that apply to this filter.
* If the filter matches, the offer matcher manager has higher priority than other matchers.
*/
def precedenceFor: Option[PathId] = None
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/matcher/base/OfferMatcher.scala | Scala | apache-2.0 | 3,492 |
import akka.actor._
object Guess2 {
def main(args: Array[String]) {
println("Guess2: begin")
val system = ActorSystem("Guess2")
val server = ActorDSL.actor(system,"GuessServer2")(new GuessServer2)
val client = ActorDSL.actor(system)(new GuessClient2)
import scala.concurrent.duration._
implicit val timeout = akka.util.Timeout(5.seconds)
import system.dispatcher
import akka.pattern.ask
val responseFuture = client ? 'start
responseFuture.onSuccess { case response =>
println("Guess2: received response: "+response)
client ! 'stop
}
println("Guess2: end")
}
}
class GuessClient2 extends Actor {
println("GuessClient2: starting")
private[this] var n: Int = 5
val server = context.actorFor("../GuessServer2")
def receive = {
case 'start =>
val guessSender = sender
send(n)
context.become {
case 'correct =>
println(s"GuessClient2: solution found: $n")
context.unbecome()
guessSender ! n
case 'tooSmall =>
println("GuessClient2: too small")
n = n + 1
send(n)
case 'tooBig =>
println("GuessClient2: too big")
n = n - 1
send(n)
}
case 'stop =>
println("GuessClient2: stopping")
context.system.shutdown()
}
def send(msg: Int) {
println(s"GuessClient2: sending $msg")
server ! msg
}
}
class GuessServer2 extends Actor {
val n = scala.util.Random.nextInt(10)
println("GuessServer2: starting")
def receive = {
case x:Int if x > n => send(sender, 'tooBig)
case x:Int if x < n => send(sender, 'tooSmall)
case x:Int => send(sender, 'correct)
}
def send(sender: ActorRef, msg: Symbol) {
println(s"GuessServer2: sending $msg")
sender ! msg
}
override def preStart() = println("GuessServer2: preStart")
override def postStop() = println("GuessServer2: preStop")
}
| grzegorzbalcerek/scala-book-examples | examples/Guess2.scala | Scala | mit | 1,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark
import org.apache.spark.Partitioner
import org.apache.carbondata.core.metadata.schema.PartitionInfo
import org.apache.carbondata.core.metadata.schema.partition.PartitionType
import org.apache.carbondata.core.scan.partition.{HashPartitioner => JavaHashPartitioner, ListPartitioner => JavaListPartitioner, RangePartitioner => JavaRangePartitioner}
import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException
object PartitionFactory {
def getPartitioner(partitionInfo: PartitionInfo): Partitioner = {
partitionInfo.getPartitionType match {
case PartitionType.HASH => new HashPartitioner(partitionInfo.getNumPartitions)
case PartitionType.LIST => new ListPartitioner(partitionInfo)
case PartitionType.RANGE => new RangePartitioner(partitionInfo)
case partitionType =>
throw new CarbonDataLoadingException(s"Unsupport partition type: ${partitionType}")
}
}
}
class HashPartitioner(partitions: Int) extends Partitioner {
private val partitioner = new JavaHashPartitioner(partitions)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
class ListPartitioner(partitionInfo: PartitionInfo) extends Partitioner {
private val partitioner = new JavaListPartitioner(partitionInfo)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
class RangePartitioner(partitionInfo: PartitionInfo) extends Partitioner {
private val partitioner = new JavaRangePartitioner(partitionInfo)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
| HuaweiBigData/carbondata | integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala | Scala | apache-2.0 | 2,609 |
package repositories.analysis.dao
import com.google.inject.{Inject, Singleton}
import models.analysis.Treatment
import no.uio.musit.MusitResults.{MusitResult, MusitSuccess}
import no.uio.musit.repositories.DbErrorHandlers
import play.api.Logger
import play.api.db.slick.DatabaseConfigProvider
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class TreatmentDao @Inject()(
implicit
val dbConfigProvider: DatabaseConfigProvider,
val ec: ExecutionContext
) extends AnalysisTables
with DbErrorHandlers {
val logger = Logger(classOf[TreatmentDao])
import profile.api._
def getTreatmentList: Future[MusitResult[Seq[Treatment]]] = {
db.run(treatmentTable.result)
.map(_.map(fromTreatmentRow))
.map(MusitSuccess.apply)
.recover(nonFatal(s"An unexpected error occurred fetching treatment list"))
}
}
| MUSIT-Norway/musit | service_backend/app/repositories/analysis/dao/TreatmentDao.scala | Scala | gpl-2.0 | 856 |
package slinky.core
import org.scalajs.dom.document
import slinky.core.facade.{React, ReactElement}
import slinky.web.ReactDOM
import org.scalatest.funsuite.AnyFunSuite
class FunctionalComponentTest extends AnyFunSuite {
test("Can render a functional component") {
val container = document.createElement("div")
val component = FunctionalComponent[Int](_.toString)
ReactDOM.render(component(1), container)
assert(container.innerHTML == "1")
}
test("Re-rendering a memoed component with same props works") {
val container = document.createElement("div")
var renderCount = 0
case class Props(a: Int)
val component = React.memo(FunctionalComponent[Props] { props =>
renderCount += 1
props.a.toString
})
val inProps = Props(1)
ReactDOM.render(component(inProps), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
ReactDOM.render(component(inProps), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
}
test("Re-rendering a memoed component with different props works") {
val container = document.createElement("div")
var renderCount = 0
case class Props(a: Int)
val component = React.memo(FunctionalComponent[Props] { props =>
renderCount += 1
props.a.toString
})
val inProps = Props(1)
ReactDOM.render(component(inProps), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
ReactDOM.render(component(inProps.copy(a = 2)), container)
assert(container.innerHTML == "2")
assert(renderCount == 2)
}
test("Re-rendering a memoed component with matching comparison works") {
val container = document.createElement("div")
var renderCount = 0
case class Props(a: Int, ignore: Int)
val component = React.memo(FunctionalComponent[Props] { props =>
renderCount += 1
props.a.toString
}, (oldProps: Props, newProps: Props) => oldProps.a == newProps.a)
val inProps = Props(1, 2)
ReactDOM.render(component(inProps), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
ReactDOM.render(component(inProps.copy(ignore = 3)), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
}
test("Re-rendering a memoed component with non-matching comparison works") {
val container = document.createElement("div")
var renderCount = 0
case class Props(a: Int)
val component = React.memo(FunctionalComponent[Props] { props =>
renderCount += 1
props.a.toString
}, (oldProps: Props, newProps: Props) => oldProps.a == newProps.a)
val inProps = Props(1)
ReactDOM.render(component(inProps), container)
assert(container.innerHTML == "1")
assert(renderCount == 1)
ReactDOM.render(component(inProps.copy(a = 2)), container)
assert(container.innerHTML == "2")
assert(renderCount == 2)
}
test("Cannot reuse half-built functional component") {
val component = FunctionalComponent[Int](_.toString)
val halfBuilt = component(1)
halfBuilt.withKey("abc"): ReactElement
assertThrows[IllegalStateException] {
halfBuilt.withKey("abc2"): ReactElement
}
}
}
| shadaj/slinky | tests/src/test/scala/slinky/core/FunctionalComponentTest.scala | Scala | mit | 3,222 |
package util
import org.apache.abdera.model.Feed
/**
*
* @author ponkotuy
* Date: 14/12/08.
*/
object Abdera {
private[this] val instance = new org.apache.abdera.Abdera()
def newFeed(): Feed = instance.newFeed()
}
| ponkotuy/MyFleetGirls | server/app/util/Abdera.scala | Scala | mit | 224 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.IOException
import java.net.URI
import java.text.SimpleDateFormat
import java.util.{Date, Locale, Random}
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.common.FileUtils
import org.apache.hadoop.hive.ql.exec.TaskRunner
import org.apache.hadoop.hive.ql.ErrorMsg
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.{AnalysisException, Dataset, Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.execution.datasources.FileFormatWriter
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc}
import org.apache.spark.sql.hive.client.{HiveClientImpl, HiveVersion}
import org.apache.spark.SparkException
/**
* Command for writing data out to a Hive table.
*
* This class is mostly a mess, for legacy reasons (since it evolved in organic ways and had to
* follow Hive's internal implementations closely, which itself was a mess too). Please don't
* blame Reynold for this! He was just moving code around!
*
* In the future we should converge the write path for Hive with the normal data source write path,
* as defined in `org.apache.spark.sql.execution.datasources.FileFormatWriter`.
*
* @param table the metadata of the table.
* @param partition a map from the partition key to the partition value (optional). If the partition
* value is optional, dynamic partition insert will be performed.
* As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS ...` would have
*
* {{{
* Map('a' -> Some('1'), 'b' -> Some('2'))
* }}}
*
* and `INSERT INTO tbl PARTITION (a=1, b) AS ...`
* would have
*
* {{{
* Map('a' -> Some('1'), 'b' -> None)
* }}}.
* @param query the logical plan representing data to write to.
* @param overwrite overwrite existing table or partitions.
* @param ifNotExists If true, only write if the table or partition does not exist.
*/
case class InsertIntoHiveTable(
table: CatalogTable,
partition: Map[String, Option[String]],
query: LogicalPlan,
overwrite: Boolean,
ifNotExists: Boolean) extends RunnableCommand {
override protected def innerChildren: Seq[LogicalPlan] = query :: Nil
var createdTempDir: Option[Path] = None
private def executionId: String = {
val rand: Random = new Random
val format = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss_SSS", Locale.US)
"hive_" + format.format(new Date) + "_" + Math.abs(rand.nextLong)
}
private def getStagingDir(
inputPath: Path,
hadoopConf: Configuration,
stagingDir: String): Path = {
val inputPathUri: URI = inputPath.toUri
val inputPathName: String = inputPathUri.getPath
val fs: FileSystem = inputPath.getFileSystem(hadoopConf)
val stagingPathName: String =
if (inputPathName.indexOf(stagingDir) == -1) {
new Path(inputPathName, stagingDir).toString
} else {
inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length)
}
val dir: Path =
fs.makeQualified(
new Path(stagingPathName + "_" + executionId + "-" + TaskRunner.getTaskRunnerID))
logDebug("Created staging dir = " + dir + " for path = " + inputPath)
try {
if (!FileUtils.mkdir(fs, dir, true, hadoopConf)) {
throw new IllegalStateException("Cannot create staging directory '" + dir.toString + "'")
}
createdTempDir = Some(dir)
fs.deleteOnExit(dir)
} catch {
case e: IOException =>
throw new RuntimeException(
"Cannot create staging directory '" + dir.toString + "': " + e.getMessage, e)
}
dir
}
private def getExternalScratchDir(
extURI: URI,
hadoopConf: Configuration,
stagingDir: String): Path = {
getStagingDir(
new Path(extURI.getScheme, extURI.getAuthority, extURI.getPath),
hadoopConf,
stagingDir)
}
def getExternalTmpPath(
path: Path,
hiveVersion: HiveVersion,
hadoopConf: Configuration,
stagingDir: String,
scratchDir: String): Path = {
import org.apache.spark.sql.hive.client.hive._
// Before Hive 1.1, when inserting into a table, Hive will create the staging directory under
// a common scratch directory. After the writing is finished, Hive will simply empty the table
// directory and move the staging directory to it.
// After Hive 1.1, Hive will create the staging directory under the table directory, and when
// moving staging directory to table directory, Hive will still empty the table directory, but
// will exclude the staging directory there.
// We have to follow the Hive behavior here, to avoid troubles. For example, if we create
// staging directory under the table director for Hive prior to 1.1, the staging directory will
// be removed by Hive when Hive is trying to empty the table directory.
val hiveVersionsUsingOldExternalTempPath: Set[HiveVersion] = Set(v12, v13, v14, v1_0)
val hiveVersionsUsingNewExternalTempPath: Set[HiveVersion] = Set(v1_1, v1_2, v2_0, v2_1)
// Ensure all the supported versions are considered here.
assert(hiveVersionsUsingNewExternalTempPath ++ hiveVersionsUsingOldExternalTempPath ==
allSupportedHiveVersions)
if (hiveVersionsUsingOldExternalTempPath.contains(hiveVersion)) {
oldVersionExternalTempPath(path, hadoopConf, scratchDir)
} else if (hiveVersionsUsingNewExternalTempPath.contains(hiveVersion)) {
newVersionExternalTempPath(path, hadoopConf, stagingDir)
} else {
throw new IllegalStateException("Unsupported hive version: " + hiveVersion.fullVersion)
}
}
// Mostly copied from Context.java#getExternalTmpPath of Hive 0.13
def oldVersionExternalTempPath(
path: Path,
hadoopConf: Configuration,
scratchDir: String): Path = {
val extURI: URI = path.toUri
val scratchPath = new Path(scratchDir, executionId)
var dirPath = new Path(
extURI.getScheme,
extURI.getAuthority,
scratchPath.toUri.getPath + "-" + TaskRunner.getTaskRunnerID())
try {
val fs: FileSystem = dirPath.getFileSystem(hadoopConf)
dirPath = new Path(fs.makeQualified(dirPath).toString())
if (!FileUtils.mkdir(fs, dirPath, true, hadoopConf)) {
throw new IllegalStateException("Cannot create staging directory: " + dirPath.toString)
}
createdTempDir = Some(dirPath)
fs.deleteOnExit(dirPath)
} catch {
case e: IOException =>
throw new RuntimeException("Cannot create staging directory: " + dirPath.toString, e)
}
dirPath
}
// Mostly copied from Context.java#getExternalTmpPath of Hive 1.2
def newVersionExternalTempPath(
path: Path,
hadoopConf: Configuration,
stagingDir: String): Path = {
val extURI: URI = path.toUri
if (extURI.getScheme == "viewfs") {
getExtTmpPathRelTo(path.getParent, hadoopConf, stagingDir)
} else {
new Path(getExternalScratchDir(extURI, hadoopConf, stagingDir), "-ext-10000")
}
}
def getExtTmpPathRelTo(
path: Path,
hadoopConf: Configuration,
stagingDir: String): Path = {
new Path(getStagingDir(path, hadoopConf, stagingDir), "-ext-10000") // Hive uses 10000
}
/**
* Inserts all the rows in the table into Hive. Row objects are properly serialized with the
* `org.apache.hadoop.hive.serde2.SerDe` and the
* `org.apache.hadoop.mapred.OutputFormat` provided by the table definition.
*/
override def run(sparkSession: SparkSession): Seq[Row] = {
val sessionState = sparkSession.sessionState
val externalCatalog = sparkSession.sharedState.externalCatalog
val hiveVersion = externalCatalog.asInstanceOf[HiveExternalCatalog].client.version
val hadoopConf = sessionState.newHadoopConf()
val stagingDir = hadoopConf.get("hive.exec.stagingdir", ".hive-staging")
val scratchDir = hadoopConf.get("hive.exec.scratchdir", "/tmp/hive")
val hiveQlTable = HiveClientImpl.toHiveTable(table)
// Have to pass the TableDesc object to RDD.mapPartitions and then instantiate new serializer
// instances within the closure, since Serializer is not serializable while TableDesc is.
val tableDesc = new TableDesc(
hiveQlTable.getInputFormatClass,
// The class of table should be org.apache.hadoop.hive.ql.metadata.Table because
// getOutputFormatClass will use HiveFileFormatUtils.getOutputFormatSubstitute to
// substitute some output formats, e.g. substituting SequenceFileOutputFormat to
// HiveSequenceFileOutputFormat.
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata
)
val tableLocation = hiveQlTable.getDataLocation
val tmpLocation =
getExternalTmpPath(tableLocation, hiveVersion, hadoopConf, stagingDir, scratchDir)
val fileSinkConf = new FileSinkDesc(tmpLocation.toString, tableDesc, false)
val isCompressed = hadoopConf.get("hive.exec.compress.output", "false").toBoolean
if (isCompressed) {
// Please note that isCompressed, "mapreduce.output.fileoutputformat.compress",
// "mapreduce.output.fileoutputformat.compress.codec", and
// "mapreduce.output.fileoutputformat.compress.type"
// have no impact on ORC because it uses table properties to store compression information.
hadoopConf.set("mapreduce.output.fileoutputformat.compress", "true")
fileSinkConf.setCompressed(true)
fileSinkConf.setCompressCodec(hadoopConf
.get("mapreduce.output.fileoutputformat.compress.codec"))
fileSinkConf.setCompressType(hadoopConf
.get("mapreduce.output.fileoutputformat.compress.type"))
}
val numDynamicPartitions = partition.values.count(_.isEmpty)
val numStaticPartitions = partition.values.count(_.nonEmpty)
val partitionSpec = partition.map {
case (key, Some(value)) => key -> value
case (key, None) => key -> ""
}
// All partition column names in the format of "<column name 1>/<column name 2>/..."
val partitionColumns = fileSinkConf.getTableInfo.getProperties.getProperty("partition_columns")
val partitionColumnNames = Option(partitionColumns).map(_.split("/")).getOrElse(Array.empty)
// By this time, the partition map must match the table's partition columns
if (partitionColumnNames.toSet != partition.keySet) {
throw new SparkException(
s"""Requested partitioning does not match the ${table.identifier.table} table:
|Requested partitions: ${partition.keys.mkString(",")}
|Table partitions: ${table.partitionColumnNames.mkString(",")}""".stripMargin)
}
// Validate partition spec if there exist any dynamic partitions
if (numDynamicPartitions > 0) {
// Report error if dynamic partitioning is not enabled
if (!hadoopConf.get("hive.exec.dynamic.partition", "true").toBoolean) {
throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg)
}
// Report error if dynamic partition strict mode is on but no static partition is found
if (numStaticPartitions == 0 &&
hadoopConf.get("hive.exec.dynamic.partition.mode", "strict").equalsIgnoreCase("strict")) {
throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg)
}
// Report error if any static partition appears after a dynamic partition
val isDynamic = partitionColumnNames.map(partitionSpec(_).isEmpty)
if (isDynamic.init.zip(isDynamic.tail).contains((true, false))) {
throw new AnalysisException(ErrorMsg.PARTITION_DYN_STA_ORDER.getMsg)
}
}
val committer = FileCommitProtocol.instantiate(
sparkSession.sessionState.conf.fileCommitProtocolClass,
jobId = java.util.UUID.randomUUID().toString,
outputPath = tmpLocation.toString,
isAppend = false)
val partitionAttributes = partitionColumnNames.takeRight(numDynamicPartitions).map { name =>
query.resolve(name :: Nil, sparkSession.sessionState.analyzer.resolver).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${query.output.map(_.name).mkString(", ")}]")
}.asInstanceOf[Attribute]
}
FileFormatWriter.write(
sparkSession = sparkSession,
queryExecution = Dataset.ofRows(sparkSession, query).queryExecution,
fileFormat = new HiveFileFormat(fileSinkConf),
committer = committer,
outputSpec = FileFormatWriter.OutputSpec(tmpLocation.toString, Map.empty),
hadoopConf = hadoopConf,
partitionColumns = partitionAttributes,
bucketSpec = None,
refreshFunction = _ => (),
options = Map.empty)
if (partition.nonEmpty) {
if (numDynamicPartitions > 0) {
externalCatalog.loadDynamicPartitions(
db = table.database,
table = table.identifier.table,
tmpLocation.toString,
partitionSpec,
overwrite,
numDynamicPartitions)
} else {
// scalastyle:off
// ifNotExists is only valid with static partition, refer to
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries
// scalastyle:on
val oldPart =
externalCatalog.getPartitionOption(
table.database,
table.identifier.table,
partitionSpec)
var doHiveOverwrite = overwrite
if (oldPart.isEmpty || !ifNotExists) {
// SPARK-18107: Insert overwrite runs much slower than hive-client.
// Newer Hive largely improves insert overwrite performance. As Spark uses older Hive
// version and we may not want to catch up new Hive version every time. We delete the
// Hive partition first and then load data file into the Hive partition.
if (oldPart.nonEmpty && overwrite) {
oldPart.get.storage.locationUri.foreach { uri =>
val partitionPath = new Path(uri)
val fs = partitionPath.getFileSystem(hadoopConf)
if (fs.exists(partitionPath)) {
if (!fs.delete(partitionPath, true)) {
throw new RuntimeException(
"Cannot remove partition directory '" + partitionPath.toString)
}
// Don't let Hive do overwrite operation since it is slower.
doHiveOverwrite = false
}
}
}
// inheritTableSpecs is set to true. It should be set to false for an IMPORT query
// which is currently considered as a Hive native command.
val inheritTableSpecs = true
externalCatalog.loadPartition(
table.database,
table.identifier.table,
tmpLocation.toString,
partitionSpec,
isOverwrite = doHiveOverwrite,
inheritTableSpecs = inheritTableSpecs,
isSrcLocal = false)
}
}
} else {
externalCatalog.loadTable(
table.database,
table.identifier.table,
tmpLocation.toString, // TODO: URI
overwrite,
isSrcLocal = false)
}
// Attempt to delete the staging directory and the inclusive files. If failed, the files are
// expected to be dropped at the normal termination of VM since deleteOnExit is used.
try {
createdTempDir.foreach { path => path.getFileSystem(hadoopConf).delete(path, true) }
} catch {
case NonFatal(e) =>
logWarning(s"Unable to delete staging directory: $stagingDir.\n" + e)
}
// un-cache this table.
sparkSession.catalog.uncacheTable(table.identifier.quotedString)
sparkSession.sessionState.catalog.refreshTable(table.identifier)
// It would be nice to just return the childRdd unchanged so insert operations could be chained,
// however for now we return an empty list to simplify compatibility checks with hive, which
// does not return anything for insert operations.
// TODO: implement hive compatibility as rules.
Seq.empty[Row]
}
}
| MLnick/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala | Scala | apache-2.0 | 17,406 |
package net.surguy.filerenamer
import org.specs2.mutable.SpecificationWithJUnit
import java.io.File
import java.nio.file.Paths
import com.google.common.io.Files
import com.google.common.base.Charsets
import scala.collection.JavaConversions._
class HashRecorderTest extends SpecificationWithJUnit {
val recorder = new HashRecorder()
"Working out a relative path" should {
"give reasonable results when the second path is a subdirectory of the first" in {
recorder.relativePath(new File("/tmp/root/"), new File("/tmp/root/subdir/one.txt")) mustEqual Paths.get("subdir/one.txt")
}
}
"Processing a directory" should {
"output a properties file" in {
val outputFile = File.createTempFile("testhash", ".properties")
recorder.processAll(new File("src/test/resources"), outputFile)
val lines = Files.readLines(outputFile, Charsets.UTF_8)
println(lines.mkString("\\n"))
lines(0) mustEqual "770982747963851d3c3691201ff0d0eb=subdir\\\\test2.txt"
lines(1) mustEqual "2e5e1b29fcc9cff2b6ec4e7e0fb02ea8=test.txt"
}
}
}
| inigo/file-renamer-hash | src/test/scala/net/surguy/filerenamer/HashRecorderTest.scala | Scala | gpl-3.0 | 1,075 |
package resources
import javax.inject.Inject
import javax.inject.Singleton
import akka.stream.Materializer
import org.coursera.example.Course
import org.coursera.naptime.Fields
import org.coursera.naptime.GetGraphQLRelation
import org.coursera.naptime.MultiGetGraphQLRelation
import org.coursera.naptime.Ok
import org.coursera.naptime.ResourceName
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.resources.CourierCollectionResource
import stores.CourseStore
import scala.concurrent.ExecutionContext
@Singleton
class CoursesResource @Inject() (
courseStore: CourseStore)(implicit ec: ExecutionContext, mat: Materializer)
extends CourierCollectionResource[String, Course] {
override def resourceName = "courses"
override def resourceVersion = 1
override implicit lazy val Fields: Fields[Course] = BaseFields
.withGraphQLRelations(
"instructors" -> MultiGetGraphQLRelation(
resourceName = ResourceName("instructors", 1),
ids = "$instructorIds"),
"partner" -> GetGraphQLRelation(
resourceName = ResourceName("partners", 1),
id = "$partnerId",
description = "Partner who produces this course."),
"courseMetadata/org.coursera.example.CertificateCourseMetadata/certificateInstructors" ->
MultiGetGraphQLRelation(
resourceName = ResourceName("instructors", 1),
ids = "${courseMetadata/certificate/certificateInstructorIds}",
description = "Instructor whose name and signature appears on the course certificate."),
"courseMetadata/org.coursera.example.DegreeCourseMetadata/degreeInstructors" ->
MultiGetGraphQLRelation(
resourceName = ResourceName("instructors", 1),
ids = "${courseMetadata/degree/degreeInstructorIds}",
description = "Instructor whose name and signature appears on the degree certificate."))
def get(id: String = "v1-123") = Nap.get { context =>
OkIfPresent(id, courseStore.get(id))
}
def multiGet(ids: Set[String], types: Set[String] = Set("course", "specialization")) = Nap.multiGet { context =>
Ok(courseStore.all()
.filter(course => ids.contains(course._1))
.map { case (id, course) => Keyed(id, course) }.toList)
}
def getAll() = Nap.getAll { context =>
val courses = courseStore.all().toList.map { case (id, course) => Keyed(id, course) }
val coursesAfterNext = context.paging.start
.map(s => courses.dropWhile(_.key != s))
.getOrElse(courses)
val coursesSubset = coursesAfterNext.take(context.paging.limit)
val next = coursesAfterNext.drop(context.paging.limit).headOption.map(_.key)
Ok(coursesSubset)
.withPagination(next, Some(courses.size.toLong))
}
def byInstructor(instructorId: String) = Nap.finder { context =>
val courses = courseStore.all()
.filter(course => course._2.instructorIds.map(_.toString).contains(instructorId))
Ok(courses.toList.map { case (id, course) => Keyed(id, course) })
.withPagination(next = "testNext")
}
}
| coursera/naptime | examples/src/main/scala/resources/CoursesResource.scala | Scala | apache-2.0 | 3,037 |
/*
* Copyright 2016-2018 Michal Harish, michal.harish@gmail.com
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.kafka
import org.scalatest.{BeforeAndAfterAll, Suite}
trait EmbeddedKafka extends EmbeddedService with EmbeddedKafkaServer with BeforeAndAfterAll {
self: Suite =>
abstract override def afterAll(): Unit = try close finally super.afterAll()
}
| amient/affinity | kafka/test-util-kafka/src/main/scala/io/amient/affinity/kafka/EmbeddedKafka.scala | Scala | apache-2.0 | 1,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sparklinedata.druid.client
import java.io.IOException
import java.util.concurrent.ExecutorService
import org.apache.curator.framework.api.CompressionProvider
import org.apache.curator.framework.imps.GzipCompressionProvider
import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode
import org.apache.curator.framework.recipes.cache.{ChildData, PathChildrenCache, PathChildrenCacheEvent, PathChildrenCacheListener}
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.BoundedExponentialBackoffRetry
import org.apache.curator.utils.{CloseableUtils, ZKPaths}
import org.apache.spark.sql.SPLLogging
import org.apache.spark.util.SparklineThreadUtils
import org.sparklinedata.druid.{DruidDataSourceException, Utils}
import org.sparklinedata.druid.metadata._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.collection.mutable.{Map => MMap}
import scala.util.Try
class CuratorConnection(val zkHosts : String,
val options : DruidRelationOptions,
val cache : DruidMetadataCache,
execSvc : ExecutorService) extends SPLLogging {
val serverSegmentsCache : MMap[String, PathChildrenCache] = MMap()
val serverSegmentCacheLock = new Object
val discoveryPath = ZKPaths.makePath(options.zkDruidPath, "discovery")
val announcementPath = ZKPaths.makePath(options.zkDruidPath, "announcements")
val serverSegmentsPath = ZKPaths.makePath(options.zkDruidPath, "segments")
val framework: CuratorFramework = CuratorFrameworkFactory.builder.connectString(
zkHosts).
sessionTimeoutMs(options.zkSessionTimeoutMs).
retryPolicy(new BoundedExponentialBackoffRetry(1000, 45000, 30)).
compressionProvider(
new PotentiallyGzippedCompressionProvider(options.zkEnableCompression)
).build
val announcementsCache : PathChildrenCache = new PathChildrenCache(
framework,
announcementPath,
true,
true,
execSvc
)
val serverSegmentsPathCache : PathChildrenCache = new PathChildrenCache(
framework,
serverSegmentsPath,
true,
true,
execSvc
)
val listener = new PathChildrenCacheListener {
override def childEvent(client: CuratorFramework, event: PathChildrenCacheEvent): Unit = {
event.getType match {
case PathChildrenCacheEvent.Type.CHILD_ADDED |
PathChildrenCacheEvent.Type.CHILD_REMOVED => {
println(s"Event received: ${event.getType}")
cache.clearCache(zkHosts)
}
case _ => ()
}
}
}
val serverSegmentsListener = new PathChildrenCacheListener {
override def childEvent(client: CuratorFramework, event: PathChildrenCacheEvent): Unit = {
event.getType match {
case PathChildrenCacheEvent.Type.CHILD_ADDED => {
serverSegmentCacheLock.synchronized {
val child: ChildData = event.getData
val key = getServerKey(event)
if (serverSegmentsCache.contains(key)) {
log.error(
"New node[%s] but there was already one. That's not good, ignoring new one.",
child.getPath
)
} else {
val segmentsPath: String = String.format("%s/%s", serverSegmentsPath, key)
val segmentsCache: PathChildrenCache = new PathChildrenCache(
framework,
segmentsPath,
true,
true,
execSvc
)
segmentsCache.getListenable.addListener(listener)
serverSegmentsCache(key) = segmentsCache
logDebug(s"Starting inventory cache for $key, inventoryPath $segmentsPath")
segmentsCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT)
}
}
}
case PathChildrenCacheEvent.Type.CHILD_REMOVED => {
serverSegmentCacheLock.synchronized {
val child: ChildData = event.getData
val key = getServerKey(event)
val segmentsCache: Option[PathChildrenCache] = serverSegmentsCache.remove(key)
if (segmentsCache.isDefined) {
log.debug("Closing inventory cache for %s. Also removing listeners.", key)
segmentsCache.get.getListenable.clear()
segmentsCache.get.close()
} else log.warn("Container[%s] removed that wasn't a container!?", child.getPath)
}
}
case _ => ()
}
}
}
announcementsCache.getListenable.addListener(listener)
serverSegmentsPathCache.getListenable.addListener(serverSegmentsListener)
framework.start()
announcementsCache.start(StartMode.BUILD_INITIAL_CACHE)
serverSegmentsPathCache.start(StartMode.POST_INITIALIZED_EVENT)
// trigger loading class CloseableUtils
CloseableUtils.closeQuietly(null)
SparklineThreadUtils.addShutdownHook {() =>
Try {announcementsCache.close()}
Try {serverSegmentsPathCache.close()}
Try {
serverSegmentCacheLock.synchronized {
serverSegmentsCache.values.foreach { inventoryCache =>
inventoryCache.getListenable.clear()
inventoryCache.close()
}
}
}
Try {CloseableUtils.closeQuietly(framework)}
}
/*
Not using Curator Discovery extension because of mvn issues.
Code using discovery extension:
import org.apache.curator.x.discovery.details.{JsonInstanceSerializer, ServiceDiscoveryImpl}
val discovery = new ServiceDiscoveryImpl(
framework,
s"${options.zkDruidPath}/discovery",
new JsonInstanceSerializer(classOf[DruidNode]),
null)
def getService(name : String) : String = {
import collection.JavaConversions._
discovery.queryForInstances(name).map(
s => s"${s.getAddress}:${s.getPort}"
).headOption.getOrElse(
throw new DruidDataSourceException(s"Failed to get '$name' for '$zkHosts'")
)
}
*/
def getService(name : String) : String = {
import Utils._
import collection.JavaConversions._
val n = if (options.zkQualifyDiscoveryNames ) s"${options.zkDruidPath}:$name" else name
val sPath = ZKPaths.makePath(discoveryPath, n)
val b: java.util.List[String] =
framework.getChildren().forPath(sPath)
try {
val idPath = ZKPaths.makePath(sPath, b.head)
val bytes: Array[Byte] = framework.getData.forPath(idPath)
val nd = parse(new String(bytes)).extract[DruidNode]
s"${nd.address}:${nd.port}"
} catch {
case e : Exception =>
throw new DruidDataSourceException(s"Failed to get '$name' for '$zkHosts'", e)
}
}
def getBroker : String = {
getService("broker")
}
def getCoordinator : String = {
getService("coordinator")
}
private def getServerKey(event: PathChildrenCacheEvent) : String = {
val child: ChildData = event.getData
val data: Array[Byte] = getZkDataForNode(child.getPath)
if (data == null) {
log.info("Ignoring event: Type - %s , Path - %s , Version - %s",
Array(event.getType, child.getPath, child.getStat.getVersion))
null
} else {
ZKPaths.getNodeFromPath(child.getPath)
}
}
private def getZkDataForNode(path: String): Array[Byte] = {
try {
framework.getData.decompressed.forPath(path)
} catch {
case ex: Exception => {
log.warn(s"Exception while getting data for node $path", ex)
null
}
}
}
}
/*
* copied from druid code base.
*/
class PotentiallyGzippedCompressionProvider(val compressOutput: Boolean)
extends CompressionProvider {
private val base: GzipCompressionProvider = new GzipCompressionProvider
@throws[Exception]
def compress(path: String, data: Array[Byte]): Array[Byte] = {
return if (compressOutput) base.compress(path, data)
else data
}
@throws[Exception]
def decompress(path: String, data: Array[Byte]): Array[Byte] = {
try {
return base.decompress(path, data)
}
catch {
case e: IOException => {
return data
}
}
}
}
| SparklineData/spark-druid-olap | src/main/scala/org/sparklinedata/druid/client/CuratorConnection.scala | Scala | apache-2.0 | 8,857 |
package bozzy.steno
/**
* Created by ted on 2016-02-08.
*/
abstract class Translation(val raw: String, val format: DictionaryFormat.Value) {
val word_count: Int = (raw split ' ').length
def toJSON: String
def toRTF: String
}
class RTFTranslation(override val raw: String) extends Translation(raw, DictionaryFormat.RTF) {
def toRTF = raw
def toJSON = raw // TODO: Implement conversion.
}
class JSONTranslation(override val raw: String) extends Translation(raw, DictionaryFormat.RTF) {
def toRTF = raw // TODO: Implement conversion.
def toJSON = raw
}
| Germanika/Bozzy | src/main/scala/bozzy/steno/Translation.scala | Scala | gpl-3.0 | 571 |
package mesosphere.marathon.io
import java.math.BigInteger
import java.net.{ URLConnection, HttpURLConnection, URL }
import java.security.MessageDigest
import scala.collection.JavaConverters._
import scala.concurrent.Future
import org.apache.commons.io.FilenameUtils.getName
import scala.concurrent.ExecutionContext.Implicits.global
trait PathFun {
private[this] def md = MessageDigest.getInstance("SHA-1")
def mdHex(in: String): String = {
val ret = md
ret.update(in.getBytes("UTF-8"), 0, in.length)
new BigInteger(1, ret.digest()).toString(16)
}
def fileName(url: URL): String = getName(url.getFile)
def contentPath(url: URL): Future[String] = contentHeader(url).map { header =>
//filter only strong eTags and make sure, it can be used as path
val eTag: Option[String] = header.get("ETag")
.flatMap(_.filterNot(_.startsWith("W/")).headOption)
.map(_.replaceAll("[^A-z0-9\\-]", ""))
val contentPart = eTag.getOrElse(IO.mdSum(url.openStream()))
s"$contentPart/${fileName(url)}"
}
def contentHeader(url: URL): Future[Map[String, List[String]]] = Future {
val connection = url.openConnection() match {
case http: HttpURLConnection =>
http.setRequestMethod("HEAD")
http
case other: URLConnection => other
}
scala.concurrent.blocking(connection.getHeaderFields)
.asScala.toMap.map { case (key, list) => (key, list.asScala.toList) }
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/io/PathFun.scala | Scala | apache-2.0 | 1,445 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core
import java.util.Collections
import com.comcast.money.api.{ InstrumentationLibrary, Note, SpanId, SpanInfo }
import io.opentelemetry.api.trace.{ Span, SpanKind, StatusCode }
private[core] case class CoreSpanInfo(
id: SpanId,
name: String,
kind: SpanKind = SpanKind.INTERNAL,
startTimeNanos: Long = 0L,
endTimeNanos: Long = 0L,
durationNanos: Long = 0L,
status: StatusCode = StatusCode.UNSET,
description: String = "",
notes: java.util.Map[String, Note[_]] = Collections.emptyMap(),
override val events: java.util.List[SpanInfo.Event] = Collections.emptyList(),
override val links: java.util.List[SpanInfo.Link] = Collections.emptyList(),
library: InstrumentationLibrary = Money.InstrumentationLibrary,
appName: String = Money.Environment.applicationName,
host: String = Money.Environment.hostName) extends SpanInfo | Comcast/money | money-core/src/main/scala/com/comcast/money/core/CoreSpanInfo.scala | Scala | apache-2.0 | 1,504 |
package com.getjenny.starchat.services.esclient
/**
* Created by Angelo Leto <angelo@getjenny.com> on 01/07/16.
*/
object DecisionTableElasticClient extends ElasticClient {
override val indexName: String = ""
override val indexSuffix: String = config.getString("es.dt_index_suffix")
val queryMinThreshold : Float = config.getDouble("es.dt_query_min_threshold").toFloat
val boostExactMatchFactor : Float = config.getDouble("es.dt_boost_exact_match_factor").toFloat
val queriesScoreMode: String = config.getString("es.dt_queries_score_mode").toLowerCase
val getNextResponseThreshold: Double =
config.getDouble("starchat.get_next_response_threshold")
val loopDetectionMaxCycleLength: Int = config.getInt("starchat.loopDetectionMaxCycleLength")
val loopDetectionExaminedLength: Int = config.getInt("starchat.loopDetectionExaminedLength")
val loopDetectionTimeThreshold: Long = config.getLong("starchat.loopDetectionTimeThreshold")
val cleanDeletedDTStatesProcessInterval: Long =
config.getLong("starchat.clean_deleted_dt_states_process_interval")
val cleanDeletedDTStatesAge: Long = config.getLong("starchat.clean_deleted_dt_states_age")
override val mappingPath: String = "/index_management/json_index_spec/general/state.json"
override val updateMappingPath: String = "/index_management/json_index_spec/general/update/state.json"
override val numberOfShards: Int = config.getInt("es.state_idx_number_of_shards")
override val numberOfReplicas: Int = config.getInt("es.state_idx_number_of_replicas")
}
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/services/esclient/DecisionTableElasticClient.scala | Scala | gpl-2.0 | 1,543 |
package org.scalajs.nio
import org.scalajs.dom
abstract class ByteBuffer extends Buffer with TypedBuffer[Byte, ByteBuffer] with Comparable[ByteBuffer] {
// Defining this one here instead of TypedBuffer because of the type erasure conflict
def put(src: Array[Byte]): ByteBuffer = this.put(src, 0, src.length)
def asCharBuffer(): Nothing // CharBuffer not implemented
def asShortBuffer(): ShortBuffer
def asIntBuffer(): IntBuffer
def asLongBuffer(): LongBuffer
def asFloatBuffer(): FloatBuffer
def asDoubleBuffer(): DoubleBuffer
def order(bo: ByteOrder): ByteBuffer
def getChar(): Char
def getChar(index: Int): Char
def getShort(): Short
def getShort(index: Int): Short
def getInt(): Int
def getInt(index: Int): Int
def getLong(): Long
def getLong(index: Int): Long
def getFloat(): Float
def getFloat(index: Int): Float
def getDouble(): Double
def getDouble(index: Int): Double
def putChar(value: Char): ByteBuffer
def putChar(index: Int, value: Char): ByteBuffer
def putShort(value: Short): ByteBuffer
def putShort(index: Int, value: Short): ByteBuffer
def putInt(value: Int): ByteBuffer
def putInt(index: Int, value: Int): ByteBuffer
def putLong(value: Long): ByteBuffer
def putLong(index: Int, value: Long): ByteBuffer
def putFloat(value: Float): ByteBuffer
def putFloat(index: Int, value: Float): ByteBuffer
def putDouble(value: Double): ByteBuffer
def putDouble(index: Int, value: Double): ByteBuffer
def array(): Array[Byte]
def jsArray(): dom.Int8Array
}
object ByteBuffer {
def allocate(capacity: Int): ByteBuffer = NativeByteBuffer.allocate(capacity)
def allocateDirect(capacity: Int): ByteBuffer = NativeByteBuffer.allocateDirect(capacity)
def wrap(array: Array[Byte]): ByteBuffer = NativeByteBuffer.wrap(array)
def wrap(array: Array[Byte], offset: Int, length: Int): ByteBuffer = NativeByteBuffer.wrap(array, offset, length)
} | storm-enroute/macrogl | src/buffer/scala/org/scalajs/nio/ByteBuffer.scala | Scala | bsd-3-clause | 1,922 |
package mr.merc.ui.world
import mr.merc.economics.WorldStateParliamentActions
import mr.merc.local.Localization._
import mr.merc.local.Localization
import mr.merc.politics._
import mr.merc.ui.common.{LazyPane, SceneManager}
import mr.merc.ui.dialog.ModalDialog._
import mr.merc.ui.world.PieChartBuilder.PiePart
import scalafx.scene.layout.{BorderPane, Pane}
import scalafx.beans.property.ReadOnlyObjectProperty
import scalafx.geometry.{Orientation, Pos}
import scalafx.scene.shape.Circle
import mr.merc.util.FxPropertyUtils._
import scalafx.scene.control.TabPane.TabClosingPolicy
import scalafx.scene.{Node, Scene}
import scalafx.scene.control.{ComboBox, Separator, Tab, TabPane}
import scalafx.scene.paint.Color
import scalafx.stage.Stage
import mr.merc.politics.VotersPolicy.NoVoting
import mr.merc.util.SFXMigPane
import scalafx.collections.ObservableBuffer
import scalafx.scene.image.ImageView
import scalafx.Includes._
class ParliamentPane(sceneManager: SceneManager, worldState: WorldStateParliamentActions) extends BorderPane {
left = new RulingPartyParliamentPane(sceneManager, worldState) with BorderedNode
center = new ParliamentPie(worldState)
}
class RulingPartyParliamentPane(sceneManager: SceneManager, worldState: WorldStateParliamentActions) extends SFXMigPane("", "0[]0", "0[]0[]0[]0") {
private val rulerPane = new SFXMigPane("center") {
styleClass.add("borderDownPane")
add(new HeadOfCurrentStatePane(worldState.playerState.politicalSystem.elites.stateRuler, worldState.turn))
}
private val rulingPartyPane = new TopTitledBorderPane {
styleClass.add("borderDownPane")
top = MediumText(Localization("parliament.rulingParty"))
center <== worldState.playerPoliticalSystemProperty.map { case (p, _) => new PartyViewPane(p, true).delegate }
}
private val actionsPane = new ParliamentActionsPane(sceneManager, worldState) {
styleClass.add("borderDownPane")
}
private val coalitionPaneProp: ReadOnlyObjectProperty[Node] = worldState.playerPoliticalSystemProperty.map { case (_, ps) =>
ps.parliament.map { p =>
val coalition = p.coalition.map(party => party -> p.parties(party)).toMap
val pane: Pane = new ParliamentCoalitionPane(coalition)
pane
}.getOrElse(new Pane())
}
add(rulerPane, "wrap, align center, growx, pushx")
add(actionsPane, "align center, wrap, growx, pushx")
add(rulingPartyPane, "wrap, growx, pushx")
add(new PropertyDependentPane[Node](coalitionPaneProp, identity), "wrap,grow")
BorderPane.setAlignment(coalitionPaneProp.value, Pos.Center)
coalitionPaneProp.onChange {
BorderPane.setAlignment(coalitionPaneProp.value, Pos.Center)
}
}
class ParliamentActionsPane(sceneManager: SceneManager, worldState: WorldStateParliamentActions) extends SFXMigPane("", "15%[]15%") {
private val changeRulingParty = MediumButton(Localization("parliament.changeRulingParty"))
changeRulingParty.disable <== !worldState.playerCanChangeRulingParty
changeRulingParty.onAction = { _ =>
val partyOpt = choosePartiesDialog(worldState.possibleParties(worldState.playerPoliticalSystemProperty.value._2))
partyOpt.foreach { p =>
worldState.changeRulingParty(worldState.playerState, p)
}
}
private val usurpPower = MediumButton(Localization("parliament.usurpPower"))
usurpPower.disable <== !worldState.playerCanUsurpPower
usurpPower.onAction = { _ =>
val partyOpt = choosePartiesDialog(worldState.possiblePartiesForUsurpation(worldState.playerState))
partyOpt.foreach { p =>
worldState.usurpPower(worldState.playerState, p)
}
}
private val giveUpPower = MediumButton(Localization("parliament.giveUpPower"))
giveUpPower.disable <== !worldState.playerCanGiveUpPower
giveUpPower.onAction = { _ =>
val partyOpt = choosePartiesDialog(worldState.possiblePartiesForGivingUpPower(worldState.playerState))
partyOpt.foreach { p =>
worldState.giveUpPower(worldState.playerState, p)
}
}
add(changeRulingParty, "wrap, grow, push, align center")
add(usurpPower, "wrap, grow, push, align center")
add(giveUpPower, "wrap, grow, push, align center")
def choosePartiesDialog(possibleParties: List[Party]): Option[Party] =
new SelectPartyPane(worldState.playerPoliticalSystemProperty.value._1, possibleParties).showDialog(sceneManager.stage).selected
}
class PartyViewPane(party: Party, withTitle: Boolean) extends SFXMigPane with WorldInterfaceWhiteNode {
styleClass.add("party-pane")
private val titlePane = new SFXMigPane("center") {
add(new Circle() {
radius = Components.largeFontSize / 2
fill = party.color
stroke = Color.Black
})
add(BigText(Localization(party.name)))
}
if (withTitle) {
add(titlePane, "wrap, center, span 2")
}
add(MediumText(Localization("parliament.regime")))
add(MediumText(Localization(party.regime.name)), "wrap")
add(MediumText(Localization("parliament.votersPolicy")))
add(MediumText(Localization(party.votersPolicy.name)), "wrap")
add(MediumText(Localization("parliament.foreignPolicy")))
add(MediumText(Localization(party.foreignPolicy.name)), "wrap")
add(MediumText(Localization("parliament.economy")))
add(MediumText(Localization(party.economy.name)), "wrap")
add(MediumText(Localization("parliament.migration")))
add(MediumText(Localization(party.migration.name)), "wrap")
add(MediumText(Localization("parliament.socialPolicy")))
add(MediumText(Localization(party.socialPolicy.name)), "wrap")
}
object ParliamentPie {
def pieByVotes(votes: Map[Party, Double]): Node = {
import mr.merc.economics.MapUtil.FloatOperations.MapWithFloatOperations
val percentage = votes.scaleToSum(1d)
val pies = votes.toList.sortBy(-_._2).map { case (p, count) =>
val name = Localization(p.name)
val tooltip = s"$name\n${DoubleFormatter().format(percentage(p) * 100)}%"
PiePart(p.color, name, count, Some(Left(tooltip)))
}
PieChartBuilder.build(pies, labels = true)
}
}
class ParliamentPie(worldState: WorldStateParliamentActions) extends TabPane {
import ParliamentPie._
style = Components.mediumFontStyle
private val parliamentTab = new Tab {
text = Localization("parliament.parliament")
content <== worldState.playerPoliticalSystemProperty.map { case (_, ps) =>
ps.parliament.map(_.parties).map(pieByVotes).getOrElse {
BigText(Localization("parliament.noParliament"))
}.delegate
}
}
private val votersPopularityTab = new Tab() {
text = Localization("parliament.votersPopularity")
content <== worldState.playerPoliticalSystemProperty.map { case (party, _) =>
if (party.votersPolicy == NoVoting) {
BigText(Localization("NoVoting")).delegate
} else {
new LazyPane(pieByVotes(worldState.partyPopularityAmongVoters(worldState.playerState))).delegate
}
}
}
private val popularityTab = new Tab() {
text = Localization("parliament.popularity")
content <== worldState.playerPoliticalSystemProperty.map { _ =>
new LazyPane(pieByVotes(worldState.partyPopularity(worldState.playerState))).delegate
}
}
this.tabs.addAll(parliamentTab, votersPopularityTab, popularityTab)
this.tabClosingPolicy = TabClosingPolicy.Unavailable
}
class SelectPartyPane(currentParty: Party, possibleParties: List[Party]) extends Stage {
var selected: Option[Party] = None
private val okButton = new MediumButton {
text = Localization("common.ok")
onAction = { _ =>
SelectPartyPane.this.close()
}
}
private val cancelButton = new MediumButton {
text = Localization("common.cancel")
onAction = { _ =>
selected = None
SelectPartyPane.this.close()
}
}
private val partiesComboBox = new ComboBox[Party] {
style = Components.largeFontStyle
cellFactory = (cell, p) => {
cell.graphic = new SFXMigPane {
val rect: Circle = Circle(Components.mediumFontSize)
rect.fill = p.color
rect.stroke = Color.Black
val text: BigText = BigText(Localization(p.name))
add(rect)
add(text)
}
}
buttonCell = cellFactory.value.call(null)
items = ObservableBuffer() ++ possibleParties
value = if(possibleParties.contains(currentParty)) currentParty else null
}
this.scene = new Scene() {
stylesheets.add("/css/partyPane.css")
content = new SFXMigPane() {
add(MediumText(Localization("selectRulingParty")), "wrap")
add(partiesComboBox, "wrap")
add(new PropertyDependentPane[Party](partiesComboBox.value,
party => Option(party).map(new PartyViewPane(_, false)).getOrElse(new Pane())), "wrap")
val buttonsPane = new SFXMigPane()
buttonsPane.add(okButton)
buttonsPane.add(cancelButton)
add(buttonsPane, "wrap")
}
}
partiesComboBox.value.onChange {
this.scene.value.getWindow.sizeToScene()
this.selected = Option(partiesComboBox.value.value)
}
this.onCloseRequest = { _ =>
this.selected = None
}
}
class ParliamentCoalitionPane(parties: Map[Party, Double]) extends SFXMigPane() {
private val sorted = parties.toList.sortBy(-_._2)
add(BigText(Localization("parliament.coalition")), "center,grow,push,span 2,wrap")
add(new SFXMigPane("insets 0 0 0 0") {
style = s"-fx-border-width: 0 0 1 0; -fx-border-style: solid;"
}, "grow,push,wrap,span 2")
sorted.foreach { case (p, c) =>
add(MediumText(p.name.localize))
add(MediumText(DoubleFormatter().format(100 * c) + "%"), "wrap")
}
}
class HeadOfStatePane(ruler: HeadOfState, turn: Int) extends SFXMigPane("") {
add(new ImageView(ruler.largeImage), "wrap, span 2, align center")
add(BigText(ruler.fullName + " " + Localization("common.of")), "wrap, span 2, align center")
add(BigText(EconomicLocalization.localizeCulture(ruler.culture)), "wrap, span 2, align center")
add(new StateComponentColorName(ruler.state), "wrap, align center, span 2")
add(BigText(Localization("age")), "align center")
add(BigText(ruler.age(turn).toString), "align center, wrap")
add(BigText(Localization("aggressiveness")), "align center")
add(BigText(ruler.aggressiveness.toString), "align center, wrap")
}
class HeadOfCurrentStatePane(ruler: HeadOfState, turn: Int) extends SFXMigPane {
add(new ImageView(ruler.largeImage), "")
add(new SFXMigPane {
add(BigText(ruler.fullName + " " + Localization("common.of")), "wrap, span 2, align center")
add(BigText(EconomicLocalization.localizeCulture(ruler.culture)), "wrap, span 2, align center")
add(BigText(Localization("age")), "")
add(BigText(ruler.age(turn).toString), ", wrap")
add(BigText(Localization("aggressiveness")), "")
add(BigText(ruler.aggressiveness.toString), "wrap")
})
} | RenualdMarch/merc | src/main/scala/mr/merc/ui/world/ParliamentPane.scala | Scala | gpl-3.0 | 10,728 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import cats.effect.{ContextShift, IO}
import monix.catnap.SchedulerEffect
import monix.execution.exceptions.DummyException
import scala.util.{Failure, Success}
object TaskLiftSuite extends BaseTestSuite {
import TaskConversionsSuite.{CIO, CustomConcurrentEffect, CustomEffect}
test("task.to[Task]") { _ =>
val task = Task(1)
val conv = task.to[Task]
assertEquals(task, conv)
}
test("task.to[IO]") { implicit s =>
val task = Task(1)
val io = task.to[IO]
val f = io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[IO] for errors") { implicit s =>
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[IO]
val f = io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
test("task.to[Effect]") { implicit s =>
implicit val cs: ContextShift[IO] = SchedulerEffect.contextShift[IO](s)(IO.ioEffect)
implicit val F: CustomEffect = new CustomEffect()
val task = Task(1)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[Effect] for errors") { implicit s =>
implicit val cs: ContextShift[IO] = SchedulerEffect.contextShift[IO](s)(IO.ioEffect)
implicit val F: CustomEffect = new CustomEffect()
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
test("task.to[ConcurrentEffect]") { implicit s =>
implicit val cs: ContextShift[IO] = SchedulerEffect.contextShift[IO](s)(IO.ioEffect)
implicit val F: CustomConcurrentEffect = new CustomConcurrentEffect()
val task = Task(1)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[ConcurrentEffect] for errors") { implicit s =>
implicit val cs: ContextShift[IO] = SchedulerEffect.contextShift[IO](s)(IO.ioEffect)
implicit val F: CustomConcurrentEffect = new CustomConcurrentEffect()
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
}
| monifu/monifu | monix-eval/shared/src/test/scala/monix/eval/TaskLiftSuite.scala | Scala | apache-2.0 | 3,058 |
package test
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
class ModelSpec extends Specification {
import models._
// -- Date helpers
def dateIs(date: java.util.Date, str: String) = new java.text.SimpleDateFormat("yyyy-MM-dd").format(date) == str
// --
"Computer model" should {
"be retrieved by id" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val Some(macintosh) = Computer.findById(21)
macintosh.name must equalTo("Macintosh")
macintosh.introduced must beSome.which(dateIs(_, "1984-01-24"))
}
}
"be listed along its companies" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val computers = Computer.list()
computers.total must equalTo(574)
computers.items must have length(10)
}
}
"be updated if needed" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
Computer.update(21, Computer(name="The Macintosh", introduced=None, discontinued=None, companyId=Some(1)))
val Some(macintosh) = Computer.findById(21)
macintosh.name must equalTo("The Macintosh")
macintosh.introduced must beNone
}
}
}
} | sturtesm/redhat-openshift-play-demo | test/ModelSpec.scala | Scala | gpl-3.0 | 1,386 |
package blended.security.ssl.internal
import blended.security.ssl.{SslContextInfo => SslContextInfoTrait}
import javax.net.ssl.SSLContext
trait SslContextInfoMBean extends SslContextInfoTrait
class SslContextInfo(
sslContext : SSLContext,
cyphers : List[String]
) extends SslContextInfoMBean {
override def getAllowedCypherSuites() : Array[String] = cyphers.toArray
override def getProtocol() : String = sslContext.getProtocol()
override def getEnabledProtocols() : Array[String] = sslContext.getDefaultSSLParameters().getProtocols()
override def getEnabledCypherSuites() : Array[String] = sslContext.getDefaultSSLParameters().getCipherSuites()
}
| woq-blended/blended | blended.security.ssl/src/main/scala/blended/security/ssl/internal/SslContextInfo.scala | Scala | apache-2.0 | 664 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.plugin
import org.apache.spark.api.plugin.DriverPlugin
import org.apache.spark.internal.Logging
import org.apache.spark.rpc.{IsolatedRpcEndpoint, RpcCallContext, RpcEnv}
case class PluginMessage(pluginName: String, message: AnyRef)
private class PluginEndpoint(
plugins: Map[String, DriverPlugin],
override val rpcEnv: RpcEnv)
extends IsolatedRpcEndpoint with Logging {
override def receive: PartialFunction[Any, Unit] = {
case PluginMessage(pluginName, message) =>
plugins.get(pluginName) match {
case Some(plugin) =>
try {
val reply = plugin.receive(message)
if (reply != null) {
logInfo(
s"Plugin $pluginName returned reply for one-way message of type " +
s"${message.getClass().getName()}.")
}
} catch {
case e: Exception =>
logWarning(s"Error in plugin $pluginName when handling message of type " +
s"${message.getClass().getName()}.", e)
}
case None =>
throw new IllegalArgumentException(s"Received message for unknown plugin $pluginName.")
}
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case PluginMessage(pluginName, message) =>
plugins.get(pluginName) match {
case Some(plugin) =>
context.reply(plugin.receive(message))
case None =>
throw new IllegalArgumentException(s"Received message for unknown plugin $pluginName.")
}
}
}
| maropu/spark | core/src/main/scala/org/apache/spark/internal/plugin/PluginEndpoint.scala | Scala | apache-2.0 | 2,387 |
/*
* Copyright (c) 2013 Aviat Networks.
* This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details.
*/
package vvv.docreg.backend
import akka.actor.{ActorRef, Actor}
import vvv.docreg.model.{FilteredRevision, Project, Revision, Document}
import akka.event.Logging
import net.liftweb.actor.LiftActor
import java.sql.Timestamp
import vvv.docreg.util.T
import org.squeryl.PrimitiveTypeMode._
import scala.concurrent.duration._
case class Subscribe(subscriber: LiftActor)
case class Unsubscribe(subscriber: LiftActor)
case class DocumentAdded(document: Document)
case class DocumentRevised(document: Document, latest: Revision)
case class DocumentChanged(document: Document)
case class StreamState(items: List[(Document,Revision,Project)])
case class StreamAddition(document: Document, revision: Revision, project: Project)
case class StreamInsert(document: Document, revision: Revision, project: Project, result: List[(Document,Revision,Project)])
case class StreamChange(documentId: Long, items: List[(Document,Revision,Project)])
case class StreamQuery(subscriber: LiftActor)
trait DocumentStreamComponent {
val documentStream: ActorRef
}
class DocumentStream extends Actor {
val logger = Logging(context.system, this)
var subscribers = Set.empty[LiftActor]
var start = cutoff()
var stream = List.empty[(Document,Revision,Project)]
override def preStart() {
stream = FilteredRevision.findRecent(-1)
context.system.scheduler.schedule(24.hours, 24.hours, self, 'SetStart)(context.dispatcher)
super.preStart()
}
def receive = {
case 'SetStart => {
start = cutoff()
stream = retainOnlyWithinScope
distribute(StreamState(stream))
}
case Subscribe(subscriber) => {
subscribers = subscribers + subscriber
subscriber ! StreamState(stream)
logger.debug("Stream subscription, now have {} subscribers.", subscribers.size)
}
case Unsubscribe(subscriber) => {
subscribers = subscribers - subscriber
}
case DocumentAdded(d) => {
logger.info("Stream +++ {} added", d.key)
insertInStream(d, d.latest, d.project()) foreach (distribute _)
}
case DocumentRevised(d, r) => {
logger.info("Stream *** {} revised", d.key)
insertInStream(d, r, d.project()) foreach (distribute _)
}
case DocumentChanged(d) => {
logger.info("Stream ~~~ {} changed", d.key)
d.project() match {
case Some(p) => {
stream = inTransaction(
stream flatMap { x =>
if (x._1.id == d.id) x._2.reload.map( (d, _, p) ) else Some(x)
}
)
// may be more efficient to send changed items, in map with revId -> d,r,p
distribute(StreamChange(d.id, stream))
}
case None => {
logger.warning("Can't find project matching {} change", d.key)
}
}
}
case StreamQuery(subscriber) => {
subscriber ! StreamState(stream)
}
case other => {
unhandled(other)
}
}
def retainOnlyWithinScope: List[(Document, Revision, Project)] = {
stream.filter(i => withinScope(i._2.date))
}
def cutoff(): Timestamp = {
T.ago(1000L * 60 * 60 * 24 * 31)
}
def distribute(msg: AnyRef) {
subscribers foreach (_ ! msg)
}
def insertInStream(document: Document, revision: Revision, projectOption: Option[Project]): Option[AnyRef] = {
projectOption match {
case _ if (!withinScope(revision.date)) => {
logger.warning("Stream ignoring insert for {}, not within scope.", document.key)
None
}
case Some(project) => {
if (mostRecent(revision.date)) {
stream = (document, revision, project) :: stream
Some(StreamAddition(document, revision, project))
}
else {
stream = ((document, revision, project) :: stream) sortWith (mostRecentFirst _)
Some(StreamInsert(document, revision, project, stream))
}
}
case None => {
logger.warning("Stream failed to insert for {}, could not identify project!", document.key)
None
}
}
}
def withinScope(timestamp: Timestamp): Boolean = {
start.before(timestamp)
}
def mostRecent(timestamp: Timestamp): Boolean = {
stream.headOption.map(_._2.date) match {
case None => true
case Some(current) => current.before(timestamp)
}
}
def mostRecentFirst(a: (Document, Revision, Project), b:(Document, Revision, Project)): Boolean = {
a._2.date.compareTo(b._2.date) match {
case x if (x > 0) => true
case 0 => a._2.id > b._2.id
case _ => false
}
}
}
| scott-abernethy/docreg-web | src/main/scala/vvv/docreg/backend/DocumentStream.scala | Scala | gpl-3.0 | 4,665 |
package spark.streaming.api.java
import spark.streaming.{Duration, Time, DStream}
import spark.api.java.function.{Function => JFunction}
import spark.api.java.JavaRDD
import spark.storage.StorageLevel
import spark.RDD
/**
* A Discretized Stream (DStream), the basic abstraction in Spark Streaming, is a continuous
* sequence of RDDs (of the same type) representing a continuous stream of data (see [[spark.RDD]]
* for more details on RDDs). DStreams can either be created from live data (such as, data from
* HDFS, Kafka or Flume) or it can be generated by transformation existing DStreams using operations
* such as `map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming program is running, each
* DStream periodically generates a RDD, either from live data or by transforming the RDD generated
* by a parent DStream.
*
* This class contains the basic operations available on all DStreams, such as `map`, `filter` and
* `window`. In addition, [[spark.streaming.api.java.JavaPairDStream]] contains operations available
* only on DStreams of key-value pairs, such as `groupByKeyAndWindow` and `join`.
*
* DStreams internally is characterized by a few basic properties:
* - A list of other DStreams that the DStream depends on
* - A time interval at which the DStream generates an RDD
* - A function that is used to generate an RDD after each time interval
*/
class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T])
extends JavaDStreamLike[T, JavaDStream[T], JavaRDD[T]] {
override def wrapRDD(rdd: RDD[T]): JavaRDD[T] = JavaRDD.fromRDD(rdd)
/** Return a new DStream containing only the elements that satisfy a predicate. */
def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] =
dstream.filter((x => f(x).booleanValue()))
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def cache(): JavaDStream[T] = dstream.cache()
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def persist(): JavaDStream[T] = dstream.persist()
/** Persist the RDDs of this DStream with the given storage level */
def persist(storageLevel: StorageLevel): JavaDStream[T] = dstream.persist(storageLevel)
/** Generate an RDD for the given duration */
def compute(validTime: Time): JavaRDD[T] = {
dstream.compute(validTime) match {
case Some(rdd) => new JavaRDD(rdd)
case None => null
}
}
/**
* Return a new DStream in which each RDD contains all the elements in seen in a
* sliding window of time over this DStream. The new DStream generates RDDs with
* the same interval as this DStream.
* @param windowDuration width of the window; must be a multiple of this DStream's interval.
*/
def window(windowDuration: Duration): JavaDStream[T] =
dstream.window(windowDuration)
/**
* Return a new DStream in which each RDD contains all the elements in seen in a
* sliding window of time over this DStream.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def window(windowDuration: Duration, slideDuration: Duration): JavaDStream[T] =
dstream.window(windowDuration, slideDuration)
/**
* Return a new DStream by unifying data of another DStream with this DStream.
* @param that Another DStream having the same interval (i.e., slideDuration) as this DStream.
*/
def union(that: JavaDStream[T]): JavaDStream[T] =
dstream.union(that.dstream)
}
object JavaDStream {
implicit def fromDStream[T: ClassManifest](dstream: DStream[T]): JavaDStream[T] =
new JavaDStream[T](dstream)
} | koeninger/spark | streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala | Scala | bsd-3-clause | 3,929 |
package csvside
import au.com.bytecode.opencsv.{ CSVReader => OpenCSVReader }
import au.com.bytecode.opencsv.{ CSVWriter => OpenCSVWriter }
/*
* This code from Apache 2.0 Licensed mighty-csv
*
* https://github.com/t-pleasure/mighty-csv/ at version 2dcee78
*/
object Mighty {
type Row = Array[String]
class CSVReader(reader: OpenCSVReader) extends Iterator[Row] {
private[this] val rows: Iterator[Row] = new CSVRowIterator(reader).flatten
override def hasNext(): Boolean = rows.hasNext
override def next(): Row = rows.next()
def apply[T](fn: Row => T): Iterator[T] = {
this.map { fn }
}
def close(): Unit = reader.close()
}
/**
* Wrapper class for OpenCSVReader to allow for Thread-safe CSV row iteration.
* Note: This class is actually an iterator over Option[Row]. This is to
* allow for safer/easy handling of cases where the the rows are null.
*/
class CSVRowIterator(reader: OpenCSVReader) extends Iterator[Option[Row]] {
var nextLine: Option[Row] = Option(reader.readNext())
override def hasNext() = nextLine.isDefined
override def next(): Option[Row] = {
val cur: Option[Row] = nextLine
nextLine = Option(reader.readNext())
cur
}
/** converts to Iterator[Row] */
def asRows(): Iterator[Row] = {
this.flatten
}
/** alias for mapping */
def apply[T](fn: Row => T): Iterator[T] = {
this.flatten.map { fn }
}
/** closes reader */
def close(): Unit = reader.close()
}
/**
* Allows for writing rows with Map[String,String] objects.
* headers -- specifies the list of values to extract from Map[String,String] objects.
* Also specifies the column ordering of the output.
*/
class CSVDictWriter(writer: OpenCSVWriter, headers: Seq[String]) {
/** writes the header */
def writeHeader(): Unit = writer.writeNext(headers.toArray)
/** writes a row */
def write(row: Map[String, String]): Unit = {
val rowData: Array[String] = headers.map { col: String =>
row.get(col) getOrElse sys.error("Column (%s) not found in row [%s]".format(col, row.toString))
}.toArray
writer.writeNext(rowData)
}
def close(): Unit = writer.close()
def flush(): Unit = writer.synchronized {
writer.flush()
}
}
}
| davegurnell/csvside | src/main/scala/csvside/Mighty.scala | Scala | apache-2.0 | 2,350 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.events
import config.MicroserviceAppConfig
import factories.UUIDFactory
import model.persisted.EventExamples
import model.persisted.eventschedules._
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import repositories.events.{ EventsConfigRepositoryImpl, LocationsWithVenuesRepository }
import testkit.UnitWithAppSpec
import scala.concurrent.Future
class EventsConfigRepositorySpec extends UnitWithAppSpec with ScalaFutures with testkit.MockitoSugar {
"events" must {
"successfully parse the event schedule config" in {
val input =
"""- eventType: FSAC
| description: PDFS FSB
| location: London
| venue: LONDON_FSAC
| date: 2017-04-03
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 2
| startTime: 11:00
| endTime: 12:00
| skillRequirements:
| ASSESSOR: 6
| CHAIR: 3
| DEPARTMENTAL_ASSESSOR: 3
| EXERCISE_MARKER: 3
| QUALITY_ASSURANCE_COORDINATOR: 1
| sessions:
| - description: AM
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 4
| startTime: 11:00
| endTime: 12:00
|- eventType: FSAC
| description: PDFS FSB
| location: London
| venue: LONDON_FSAC
| date: 2017-04-03
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 2
| startTime: 9:00
| endTime: 12:00
| skillRequirements:
| ASSESSOR: 6
| CHAIR: 3
| DEPARTMENTAL_ASSESSOR: 3
| EXERCISE_MARKER: 2
| QUALITY_ASSURANCE_COORDINATOR: 1
| sessions:
| - description: First
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 4
| startTime: 9:00
| endTime: 10:30
| - description: Second
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 4
| startTime: 10:30
| endTime: 12:00
|- eventType: FSAC
| description: PDFS FSB
| location: Newcastle
| venue: NEWCASTLE_LONGBENTON
| date: 2017-04-03
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 2
| startTime: 09:00
| endTime: 12:00
| skillRequirements:
| ASSESSOR: 6
| CHAIR: 3
| DEPARTMENTAL_ASSESSOR: 2
| EXERCISE_MARKER: 3
| QUALITY_ASSURANCE_COORDINATOR: 1
| sessions:
| - description: First
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 4
| startTime: 9:00
| endTime: 10:30
| - description: Second
| capacity: 36
| minViableAttendees: 12
| attendeeSafetyMargin: 4
| startTime: 10:30
| endTime: 12:00""".stripMargin
val mockLocationsWithVenuesRepo = mock[LocationsWithVenuesRepository]
when(mockLocationsWithVenuesRepo.venue(any[String])).thenReturn(Future.successful(Venue("london fsac", "bush house")))
when(mockLocationsWithVenuesRepo.location(any[String])).thenReturn(Future.successful(Location("London")))
val appConfigMock = mock[MicroserviceAppConfig]
val mockUUIDFactory = mock[UUIDFactory]
// val repo = new EventsConfigRepository {
// override protected def eventScheduleConfig: String = input
// override def locationsWithVenuesRepo: LocationsWithVenuesRepository = mockLocationsWithVenuesRepo
// }
val repo = new EventsConfigRepositoryImpl(app, mockLocationsWithVenuesRepo, appConfigMock, mockUUIDFactory) {
override protected def eventScheduleConfig: String = input
}
implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
def withDefaultFields(event: Event) = {
event.copy(id = "e1", createdAt = EventExamples.eventCreatedAt, sessions = event.sessions.map { session =>
session.copy(id = "s1")
}, wasBulkUploaded = true
)
}
whenReady(repo.events) { result =>
result.zip(EventExamples.YamlEvents).foreach { case (actual, expected) =>
withDefaultFields(actual) mustBe withDefaultFields(expected)
}
}
}
}
}
| hmrc/fset-faststream | test/services/events/EventsConfigRepositorySpec.scala | Scala | apache-2.0 | 5,449 |
/*
* The MIT License
*
* Copyright (c) 2020 Fulcrum Genomics
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.util
import com.fulcrumgenomics.bam.api.{SamOrder, SamRecord}
import com.fulcrumgenomics.testing.SamBuilder.Minus
import com.fulcrumgenomics.testing.{SamBuilder, UnitSpec}
import htsjdk.samtools.SamPairUtil
import org.scalatest.OptionValues
class AssignPrimersTest extends UnitSpec with OptionValues {
private def checkMatch(rec: SamRecord, amplicon: Option[Amplicon] = None, mateAmplicon: Option[Amplicon] = None): Unit = {
amplicon match {
case None =>
rec.contains(AssignPrimers.PrimerCoordinateTag) shouldBe false
rec.contains(AssignPrimers.AmpliconIdentifierTag) shouldBe false
case Some(amp) =>
if (rec.positiveStrand) {
rec.get[String](AssignPrimers.PrimerCoordinateTag).value shouldBe amp.leftPrimerLocation
}
else {
rec.get[String](AssignPrimers.PrimerCoordinateTag).value shouldBe amp.rightPrimerLocation
}
rec.get[String](AssignPrimers.AmpliconIdentifierTag).value shouldBe amp.identifier
}
mateAmplicon match {
case None =>
rec.contains(AssignPrimers.MatePrimerCoordinateTag) shouldBe false
rec.contains(AssignPrimers.MateAmpliconIdentifierTag) shouldBe false
case Some(amp) =>
rec.paired shouldBe true
if (rec.matePositiveStrand) {
rec.get[String](AssignPrimers.MatePrimerCoordinateTag).value shouldBe amp.leftPrimerLocation
}
else {
rec.get[String](AssignPrimers.MatePrimerCoordinateTag).value shouldBe amp.rightPrimerLocation
}
if (amplicon.contains(amp)) { // same amplicon
rec.get[String](AssignPrimers.MateAmpliconIdentifierTag).value shouldBe "="
}
else {
rec.get[String](AssignPrimers.MateAmpliconIdentifierTag).value shouldBe amp.identifier
}
}
}
private def checkMatch(rec: SamRecord, amplicon: Amplicon): Unit = checkMatch(rec=rec, amplicon=Some(amplicon))
private def checkMatch(rec: SamRecord, amplicon: Amplicon, mateAmplicon: Amplicon): Unit = {
checkMatch(rec=rec, amplicon=Some(amplicon), mateAmplicon=Some(mateAmplicon))
}
{
val ampliconsPath = makeTempFile("amplicons.", ".txt")
val outputBam = makeTempFile("output.", ".bam")
val metricsPath = makeTempFile("metrics.", ".txt")
// The amplicons to match against
val amplicons = Seq(
Amplicon("chr1", 100, 120, 180, 200),
Amplicon("chr1", 500, 520, 580, 600),
Amplicon("chr5", 100, 120, 180, 200)
)
// The reads to assign
val builder = new SamBuilder()
builder.addFrag(start=100, cigar="100M") // matches amplicon #1
builder.addFrag(start=100, cigar="50S50M", strand=SamBuilder.Minus) // no match, wrong strand
builder.addPair(start1=500, cigar1="100M", start2=501, cigar2="100M") // match amplicon #2 R1->F, R2->R
builder.addPair(start1=501, cigar1="100M", strand1=SamBuilder.Minus, start2=500, cigar2="100M", strand2=SamBuilder.Plus) // match amplicon #2 R1->R, R2->F
builder.addPair(start1=100, start2=501) // match amplicon #1 and #2, R1->1F, R2->2R
builder.addPair(start1=100, start2=10000) // match amplicon #1 for R1->1F, no match for R2
builder.addPair(contig=4, start1=100, start2=101) // match amplicon #5 , R1->F, R2->R
// Reads to test --annotate-all, where we have a primary and supplementary read
// R1->1F, R2_primary->2R, R2_supplementary->NA. If --annotate-all is used, then R2_supplementary should have 2R
val Seq(matePrimary, _) = builder.addPair(start1=100, start2=551, cigar1="100M", cigar2="50S50M")
builder.addFrag(name=matePrimary.name, contig=2, start=1, cigar="50M50S", strand=Minus).foreach { supplemental =>
supplemental.paired = true
supplemental.firstOfPair = false
supplemental.secondOfPair = true
supplemental.supplementary = true
SamPairUtil.setMateInformationOnSupplementalAlignment(supplemental.asSam, matePrimary.asSam, true)
}
Metric.write(ampliconsPath, amplicons)
// Test with/without --annotate-all set
Seq(false, true).foreach { annotateAll =>
val label = if (annotateAll) " with --annotate-all" else ""
"AssignPrimers" should s"assign primers to reads$label" in {
val tool = new AssignPrimers(
input = builder.toTempFile(),
primers = ampliconsPath,
output = outputBam,
metrics = metricsPath,
annotateAll = annotateAll
)
tool.execute()
// check the output BAM
val outputRecs = readBamRecs(bam=outputBam)
// The output BAM may be resorted, so the following methods retrieve records by read name (since they're numerically indexed)
def recs(nameIndex: Int): Seq[SamRecord] = outputRecs.filter(_.name.toInt == nameIndex)
def rec(nameIndex: Int): SamRecord = recs(nameIndex) match {
case Seq(rec) => rec
case records => throw new IllegalStateException(s"Found ${records.length}")
}
// frag
checkMatch(rec=rec(0), amplicons(0))
// frag
checkMatch(rec=rec(1))
// pair #1
checkMatch(rec=recs(2).head, amplicons(1), amplicons(1))
checkMatch(rec=recs(2).last, amplicons(1), amplicons(1))
// pair #2
checkMatch(rec=recs(3).head, amplicons(1), amplicons(1))
checkMatch(rec=recs(3).last, amplicons(1), amplicons(1))
// pair #3
checkMatch(rec=recs(4).head, amplicons(0), amplicons(1))
checkMatch(rec=recs(4).last, amplicons(1), amplicons(0))
// pair #4
checkMatch(rec=recs(5).head, amplicons(0))
checkMatch(rec=recs(5).last, None, Some(amplicons(0)))
// pair #5
checkMatch(rec=recs(6).head, amplicons(2), amplicons(2))
checkMatch(rec=recs(6).last, amplicons(2), amplicons(2))
// pair #6
checkMatch(rec=recs(7).head, amplicons(0), amplicons(1))
checkMatch(rec=recs(7)(1), amplicons(1), amplicons(0))
if (annotateAll) { // the amplicon tag (ma) **is** set in this case
checkMatch(rec=recs(7)(2), amplicons(1), amplicons(0))
} else { // the amplicon tag (ma) **is not** set in this case
checkMatch(rec=recs(7)(2), None, Some(amplicons(0)))
}
// check the metrics
val metrics = Metric.read[AssignPrimersMetric](metricsPath)
val allIdentifier = AssignPrimersMetric.AllAmpliconsIdentifier
val expected = IndexedSeq(
AssignPrimersMetric(identifier=amplicons(0).identifier, left=4, right=0, r1s=4, r2s=0, pairs=0),
AssignPrimersMetric(identifier=amplicons(1).identifier, left=2, right=4, r1s=2, r2s=4, pairs=2),
AssignPrimersMetric(identifier=amplicons(2).identifier, left=1, right=1, r1s=1, r2s=1, pairs=1),
AssignPrimersMetric(identifier=allIdentifier, left=7, right=5, r1s=7, r2s=5, pairs=3)
).map(_.finalize(total=outputRecs.length))
metrics.length shouldBe amplicons.length + 1
metrics.zip(expected).foreach { case (act, exp) =>
// compare one-by-one to make it easier to debug
act.finalize(0) shouldBe exp.finalize(0) // just the values
act shouldBe exp // now the fractions
}
metrics.length shouldBe expected.length
}
}
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/util/AssignPrimersTest.scala | Scala | mit | 8,507 |
package org.scalafmt.cli
import scala.io.Source
import java.io.InputStream
import java.nio.file.{Path, Paths}
import org.scalafmt.Error.MisformattedFile
import org.scalafmt.sysops.AbsoluteFile
sealed abstract class InputMethod {
def readInput(options: CliOptions): String
def path: Path
protected def print(text: String, options: CliOptions): Unit
protected def list(options: CliOptions): Unit
protected def overwrite(text: String, options: CliOptions): Unit
final def write(
formatted: String,
original: String,
options: CliOptions
): ExitCode = {
val codeChanged = formatted != original
if (options.writeMode == WriteMode.Stdout) print(formatted, options)
else if (codeChanged)
options.writeMode match {
case WriteMode.Test =>
val diff = InputMethod.unifiedDiff(path.toString, original, formatted)
throw MisformattedFile(path, diff)
case WriteMode.Override => overwrite(formatted, options)
case WriteMode.List => list(options)
case _ =>
}
if (options.error && codeChanged) ExitCode.TestError else ExitCode.Ok
}
}
object InputMethod {
object StdinCode {
def apply(assumeFilename: String, inputStream: InputStream): StdinCode = {
StdinCode.apply(
assumeFilename,
Source.fromInputStream(inputStream).mkString
)
}
}
case class StdinCode(filename: String, input: String) extends InputMethod {
override def path: Path = Paths.get(filename)
def readInput(options: CliOptions): String = input
override protected def print(text: String, options: CliOptions): Unit =
options.common.out.print(text)
override protected def overwrite(text: String, options: CliOptions): Unit =
print(text, options)
override protected def list(options: CliOptions): Unit =
options.common.out.println(filename)
}
case class FileContents(file: AbsoluteFile) extends InputMethod {
override def path = file.path
def readInput(options: CliOptions): String =
file.readFile(options.encoding)
override protected def print(text: String, options: CliOptions): Unit =
options.common.out.print(text)
override protected def overwrite(text: String, options: CliOptions): Unit =
file.writeFile(text)(options.encoding)
override protected def list(options: CliOptions): Unit = {
val cwd = options.cwd.toUri
options.common.out.println(cwd.relativize(file.toUri))
}
}
def unifiedDiff(
filename: String,
original: String,
revised: String
): String = {
import org.scalafmt.CompatCollections.JavaConverters._
@inline def noEol(ch: Char) = ch != '\\n' && ch != '\\r'
def jList(code: String, addEol: Boolean) = {
val last = if (addEol) Iterator.single("") else Iterator.empty
(code.linesIterator ++ last).toList.asJava
}
val a = jList(original, false)
// formatted always has EOL
val b = jList(revised, original.isEmpty || noEol(original.last))
val diff = difflib.DiffUtils.diff(a, b)
if (diff.getDeltas.isEmpty) ""
else {
difflib.DiffUtils
.generateUnifiedDiff(
s"a$filename",
s"b$filename",
a,
diff,
1
)
.iterator()
.asScala
.mkString("\\n")
}
}
}
| scalameta/scalafmt | scalafmt-cli/src/main/scala/org/scalafmt/cli/InputMethod.scala | Scala | apache-2.0 | 3,342 |
package officeladder
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.all.div
import japgolly.scalajs.react.vdom.all.`class`
object HomePage {
val container = ReactComponentB[Unit]("homePage")
.render(P => {
div(`class` := "container", content)
}).buildU
val content =
<.div(
<.h2("Office Ladder"),
<.div(
^.className := "col-lg-8",
LadderList.content))
} | yzernik/office-ladder | client/src/main/scala/officeladder/HomePage.scala | Scala | mit | 476 |
package com.lambdista.config
import scala.language.dynamics
/**
* This class lets you walk across a configuration dynamically. Nonetheless it manages possible errors within
* [[com.lambdista.config.Result]].
*
* @author Alessandro Lacava (@lambdista)
* @since 2016-11-23
*/
final case class ConfigWalker(value: Result[AbstractValue]) extends Dynamic {
def selectDynamic(key: String): ConfigWalker = {
val searchResult = value.flatMap {
case x: AbstractMap => x.get(key)
case x =>
Left(
new KeyNotFoundError(s"$x is not an AbstractMap so the $key key does not make sense on this object")
)
}
ConfigWalker(searchResult)
}
def as[A: ConcreteValue]: Result[A] =
for {
abstractValue <- value
concreteValue <- ConcreteValue[A].apply(abstractValue).toRight(new ConversionError(abstractValue))
} yield concreteValue
}
| lambdista/config | core/src/main/scala/com/lambdista/config/ConfigWalker.scala | Scala | apache-2.0 | 902 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.emailaddress
import org.scalatest.prop.PropertyChecks
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.emailaddress.EmailAddress.{Mailbox, Domain}
class EmailAddressSpec extends WordSpec with Matchers with PropertyChecks with EmailAddressGenerators {
"Creating an EmailAddress class" should {
"work for a valid email" in {
forAll(validEmailAddresses()) { address =>
EmailAddress(address).value should be(address)
}
}
"throw an exception for an invalid email" in {
an [IllegalArgumentException] should be thrownBy { EmailAddress("sausages") }
}
"throw an exception for an valid email starting with invalid characters" in {
forAll(validEmailAddresses()) { address =>
an [IllegalArgumentException] should be thrownBy { EmailAddress("§"+ address) }
}
}
"throw an exception for an valid email ending with invalid characters" in {
forAll(validEmailAddresses()) { address =>
an [IllegalArgumentException] should be thrownBy { EmailAddress(address + "§") }
}
}
"throw an exception for an empty email" in {
an [IllegalArgumentException] should be thrownBy { EmailAddress("") }
}
"throw an exception for a repeated email" in {
an[IllegalArgumentException] should be thrownBy { EmailAddress("test@domain.comtest@domain.com") }
}
"throw an exception when the '@' is missing" in {
forAll { s: String => whenever(!s.contains("@")) {
an[IllegalArgumentException] should be thrownBy { EmailAddress(s) }
}}
}
}
"An EmailAddress class" should {
"implicitly convert to a String of the address" in {
val e: String = EmailAddress("test@domain.com")
e should be ("test@domain.com")
}
"toString to a String of the address" in {
val e = EmailAddress("test@domain.com")
e.toString should be ("test@domain.com")
}
"be obfuscatable" in {
EmailAddress("abcdef@example.com").obfuscated.value should be("a****f@example.com")
}
"have a local part" in forAll (validMailbox, validDomain) { (mailbox, domain) =>
val exampleAddr = EmailAddress(s"$mailbox@$domain")
exampleAddr.mailbox should (be (a[Mailbox]) and have ('value (mailbox)))
exampleAddr.domain should (be (a[Domain]) and have ('value (domain)))
}
}
"A email address domain" should {
"be extractable from an address" in forAll (validMailbox, validDomain) { (mailbox, domain) =>
EmailAddress(s"$mailbox@$domain").domain should (be (a[Domain]) and have ('value (domain)))
}
"be creatable for a valid domain" in forAll (validDomain) { domain =>
EmailAddress.Domain(domain) should (be (a[Domain]) and have ('value (domain)))
}
"not create for invalid domains" in {
an [IllegalArgumentException] should be thrownBy EmailAddress.Domain("")
an [IllegalArgumentException] should be thrownBy EmailAddress.Domain("e.")
an [IllegalArgumentException] should be thrownBy EmailAddress.Domain(".uk")
an [IllegalArgumentException] should be thrownBy EmailAddress.Domain(".com")
an [IllegalArgumentException] should be thrownBy EmailAddress.Domain("*domain")
}
"compare equal if identical" in forAll (validDomain, validMailbox, validMailbox) { (domain, mailboxA, mailboxB) =>
val exampleA = EmailAddress(s"$mailboxA@$domain")
val exampleB = EmailAddress(s"$mailboxB@$domain")
exampleA.domain should equal (exampleB.domain)
}
"not compare equal if completely different" in forAll (validMailbox, validDomain, validDomain) { (mailbox, domainA, domainB) =>
val exampleA = EmailAddress(s"$mailbox@$domainA")
val exampleB = EmailAddress(s"$mailbox@$domainB")
exampleA.domain should not equal exampleB.domain
}
"toString to a String of the domain" in {
Domain("domain.com").toString should be ("domain.com")
}
"implicitly convert to a String of the domain" in {
val e: String = Domain("domain.com")
e should be ("domain.com")
}
}
"A email address mailbox" should {
"be extractable from an address" in forAll (validMailbox, validDomain) { (mailbox, domain) =>
EmailAddress(s"$mailbox@$domain").mailbox should (be (a[Mailbox]) and have ('value (mailbox)))
}
"compare equal" in forAll (validMailbox, validDomain, validDomain) { (mailbox, domainA, domainB) =>
val exampleA = EmailAddress(s"$mailbox@$domainA")
val exampleB = EmailAddress(s"$mailbox@$domainB")
exampleA.mailbox should equal (exampleB.mailbox)
}
"not compare equal if completely different" in forAll (validDomain, validMailbox, validMailbox) { (domain, mailboxA, mailboxB) =>
val exampleA = EmailAddress(s"$mailboxA@$domain")
val exampleB = EmailAddress(s"$mailboxB@$domain")
exampleA.mailbox should not equal exampleB.mailbox
}
"toString to a String of the domain" in {
EmailAddress("test@domain.com").mailbox.toString should be ("test")
}
"implicitly convert to a String of the domain" in {
val e: String = EmailAddress("test@domain.com").mailbox
e should be ("test")
}
}
}
| hmrc/emailaddress | src/test/scala/uk/gov/hmrc/emailaddress/EmailAddressSpec.scala | Scala | apache-2.0 | 5,794 |
package sangria.schema
import sangria.ast
import sangria.execution.WithViolations
import sangria.validation._
import sangria.macros._
import sangria.macros.derive.{
InputObjectTypeName,
ObjectTypeName,
deriveInputObjectType,
deriveObjectType
}
import sangria.util.Pos
import scala.util.{Failure, Success, Try}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class SchemaConstraintsSpec extends AnyWordSpec with Matchers {
"Schema" should {
"not allow use same type name for different GraphQL type kinds (input & output type)" in {
val inputType =
InputObjectType("Point", List(InputField("x", FloatType), InputField("y", FloatType)))
val outputType = ObjectType(
"Point",
fields[Unit, Unit](
Field("x", FloatType, resolve = _ => 1.234),
Field("y", FloatType, resolve = _ => 1.234),
Field("z", FloatType, resolve = _ => 1.234)))
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field(
"foo",
OptionType(outputType),
arguments = Argument("points", ListInputType(inputType)) :: Nil,
resolve = _ => None)))
val error = intercept[SchemaValidationException](Schema(queryType))
error.getMessage should include(
"Type name 'Point' is used for several conflicting GraphQL type kinds: ObjectType, InputObjectType. Conflict found in an argument 'points' defined in field 'foo' of 'Query' type.")
}
"not allow use same type name for different GraphQL type kinds (input & scalar type)" in {
val inputType =
InputObjectType("Point", List(InputField("x", FloatType), InputField("y", FloatType)))
val scalarType = ScalarType[String](
"Point",
coerceOutput = valueOutput,
coerceUserInput = {
case s: String => Right(s)
case _ => Left(StringCoercionViolation)
},
coerceInput = {
case ast.StringValue(s, _, _, _, _) => Right(s)
case _ => Left(StringCoercionViolation)
}
)
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field(
"foo",
OptionType(scalarType),
arguments = Argument("points", ListInputType(inputType)) :: Nil,
resolve = _ => None)))
val error = intercept[SchemaValidationException](Schema(queryType))
error.getMessage should include(
"Type name 'Point' is used for several conflicting GraphQL type kinds: ScalarType, InputObjectType. Conflict found in an argument 'points' defined in field 'foo' of 'Query' type.")
}
"not allow reserved names" in {
val inputType =
InputObjectType("__Input", List(InputField("x", FloatType), InputField("__y", FloatType)))
val scalarType = ScalarType[String](
"__Point",
coerceOutput = valueOutput,
coerceUserInput = {
case s: String => Right(s)
case _ => Left(StringCoercionViolation)
},
coerceInput = {
case ast.StringValue(s, _, _, _, _) => Right(s)
case _ => Left(StringCoercionViolation)
}
)
val bazType =
InterfaceType("__Baz", fields[Unit, Unit](Field("id", IntType, resolve = _ => 1)))
val barType = ObjectType(
"__Bar",
interfaces[Unit, Unit](bazType),
fields[Unit, Unit](Field("foo", OptionType(scalarType), resolve = _ => None)))
val colorType = EnumType(
"__Color",
values = List(
EnumValue("RED", value = 1),
EnumValue("__GREEN", value = 2),
EnumValue("__BLUE", value = 3)))
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field("__foo", OptionType(scalarType), resolve = _ => None),
Field("bar", OptionType(barType), resolve = _ => None),
Field("color", OptionType(colorType), resolve = _ => None)
)
)
val error =
intercept[SchemaValidationException](Schema(queryType, additionalTypes = inputType :: Nil))
error.violations.map(_.errorMessage).toSet should be(Set(
"Input type name '__Input' is invalid. The name is reserved for GraphQL introspection API.",
"Field name '__y' defined in input type '__Input' is invalid. The name is reserved for GraphQL introspection API.",
"Field name '__foo' defined in type 'Query' is invalid. The name is reserved for GraphQL introspection API.",
"Object type name '__Bar' is invalid. The name is reserved for GraphQL introspection API.",
"Interface type name '__Baz' is invalid. The name is reserved for GraphQL introspection API.",
"Enum type name '__Color' is invalid. The name is reserved for GraphQL introspection API.",
"Enum value name '__GREEN' defined in enum type '__Color' is invalid. The name is reserved for GraphQL introspection API.",
"Enum value name '__BLUE' defined in enum type '__Color' is invalid. The name is reserved for GraphQL introspection API.",
"Scalar type name '__Point' is invalid. The name is reserved for GraphQL introspection API."
))
}
"reject an Enum type with incorrectly named values" in {
val colorType = EnumType(
"Color",
values = List(
EnumValue("RED", value = 1),
EnumValue("true", value = 2),
EnumValue("false", value = 3),
EnumValue("null", value = 4)))
val queryType = ObjectType(
"Query",
fields[Unit, Unit](Field("color", OptionType(colorType), resolve = _ => None)))
val error = intercept[SchemaValidationException](Schema(queryType))
error.violations.map(_.errorMessage).toSet should be(
Set(
"Name 'Color.true' can not be used as an Enum value.",
"Name 'Color.false' can not be used as an Enum value.",
"Name 'Color.null' can not be used as an Enum value."
))
}
"not allow empty list of fields" in {
val int1Type = InterfaceType[Unit, Unit]("Interface1", Nil)
val int2Type = InterfaceType[Unit, Unit]("Interface2", Nil, interfaces[Unit, Unit](int1Type))
val outType = ObjectType[Unit, Unit]("Output", interfaces[Unit, Unit](int2Type), Nil)
val inputType = InputObjectType("Input", Nil)
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field(
"foo",
OptionType(outType),
arguments = Argument("bar", inputType) :: Nil,
resolve = _ => ())))
val error = intercept[SchemaValidationException](Schema(queryType))
error.violations.map(_.errorMessage).toSet should be(
Set(
"Input type 'Input' must define one or more fields.",
"Interface type 'Interface1' must define one or more fields.",
"Interface type 'Interface2' must define one or more fields.",
"Object type 'Output' must define one or more fields."
))
}
"Not allow ObjectTypes with same name to be based on different case classes" in {
val fooBazType = deriveObjectType[Unit, test.foo.Baz]()
val barBazType = deriveObjectType[Unit, test.bar.Baz]()
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field("fooBaz", OptionType(fooBazType), resolve = _ => Some(test.foo.Baz(1))),
Field("barBaz", barBazType, resolve = _ => test.bar.Baz("2", 3.0))
)
)
val error = intercept[SchemaValidationException](Schema(queryType))
error.getMessage should include(
"""Type name 'Baz' is used for several conflicting GraphQL ObjectTypes based on different classes. Conflict found in a field 'barBaz' of 'Query' type. One possible fix is to use ObjectTypeName like this: deriveObjectType[Foo, Bar](ObjectTypeName("OtherBar")) to avoid that two ObjectTypes have the same name.""")
}
"Not allow InputObjectTypes with same name but with different field names" in {
val fooBazType: InputObjectType[test.foo.Baz] =
deriveInputObjectType(InputObjectTypeName("baz"))
val barBazType: InputObjectType[test.bar.Baz] =
deriveInputObjectType(InputObjectTypeName("baz"))
val draftType = InputObjectType(
name = "DraftType",
fields = List(
InputField("fooBaz", OptionInputType(fooBazType)),
InputField("barBaz", barBazType)
)
)
val draft = Argument("draft", draftType)
val mutationType = ObjectType(
"Mutation",
fields[Unit, Unit](
Field("nothing", StringType, arguments = draft :: Nil, resolve = _ => "hello")
)
)
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field("nothing", StringType, resolve = _ => "hello")
))
val error = intercept[SchemaValidationException](Schema(queryType, Some(mutationType)))
error.getMessage should include(
"""Type name 'baz' is used for several conflicting GraphQL InputObjectTypes based on different classes. Conflict found in a field 'barBaz' of 'DraftType' input object type. One possible fix is to use InputObjectTypeName like this: deriveInputObjectType[Foo, Bar](InputObjectTypeName("OtherBar")) to avoid that two InputObjectTypes have the same name.""")
}
"Allow ObjectTypes based on different case classes but with different names" in {
implicit val fooBazType: ObjectType[Unit, test.foo.Baz] =
deriveObjectType[Unit, test.foo.Baz]()
implicit val barBazType: ObjectType[Unit, test.bar.Baz] =
deriveObjectType[Unit, test.bar.Baz](ObjectTypeName("BazWithNewName"))
val queryType = ObjectType(
"Query",
fields[Unit, Unit](
Field("fooBaz", OptionType(fooBazType), resolve = _ => Some(test.foo.Baz(1))),
Field("barBaz", barBazType, resolve = _ => test.bar.Baz("2", 3.0))
)
)
Schema(queryType) // Should not throw any SchemaValidationExceptions
}
}
"Type System: Union types must be valid" should {
"accepts a Union type with member types" in validSchema(graphql"""
type Query {
test: GoodUnion
}
type TypeA {
field: String
}
type TypeB {
field: String
}
union GoodUnion =
| TypeA
| TypeB
""")
"rejects a Union type with empty types" in invalidSchema(
graphql"""
type Query {
test: BadUnion
}
union BadUnion
""",
"Union type 'BadUnion' must define one or more member types." -> Seq(Pos(6, 9))
)
"rejects a Union type with duplicated member type" in invalidSchema(
{
val ast1 =
graphql"""
type Query {
test: BadUnion
}
union BadUnion =
| TypeA
| TypeB
"""
val ast2 =
graphql"""
type TypeA {
field: String
}
type TypeB {
field: String
}
extend union BadUnion = TypeA
"""
ast1 + ast2
},
"Union type 'BadUnion' can only include type 'TypeA' once." -> Seq(Pos(7, 17), Pos(10, 37))
)
"rejects a Union type with non-Object members types" in invalidSchema(
graphql"""
type Query {
test: BadUnion
}
type TypeA {
field: String
}
type TypeB {
field: String
}
union BadUnion =
| TypeA
| String
| TypeB
""",
"Type 'String' is not an object type." -> Seq(Pos(16, 13))
)
}
"Type System: Input Objects must have fields" should {
"accepts an Input Object type with fields" in validSchema(graphql"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject {
field: String
}
""")
"rejects an Input Object type with missing fields" in invalidSchema(
graphql"""
type Query {
field(arg: SomeInputObject): String
}
input SomeInputObject
""",
"Input type 'SomeInputObject' must define one or more fields." -> Seq(Pos(6, 9))
)
"rejects an Input Object type with incorrectly typed fields" in invalidSchema(
graphql"""
type Query {
field(arg: SomeInputObject): String
}
type SomeObject {
field: String
}
union SomeUnion = SomeObject
input SomeInputObject {
badObject: SomeObject
badUnion: SomeUnion
goodInputObject: SomeInputObject
}
""",
"Type 'SomeObject' is not an input type type." -> Seq(Pos(13, 22))
)
}
"Type System: Enum types must be well defined" should {
"rejects an Enum type without values" in invalidSchema(
graphql"""
type Query {
field: SomeEnum
}
enum SomeEnum
""",
"Enum type 'SomeEnum' must define one or more values." -> Seq(Pos(6, 9))
)
"rejects an Enum type with duplicate values" in invalidSchema(
graphql"""
type Query {
field: SomeEnum
}
enum SomeEnum {
SOME_VALUE
SOME_VALUE
}
""",
"Enum type 'SomeEnum' can include value 'SOME_VALUE' only once." -> Seq(
Pos(7, 11),
Pos(8, 11))
)
}
"Type System: Object fields must have output types" should {
"rejects with relevant locations for a non-output type as an Object field type" in invalidSchema(
graphql"""
type Query {
field: [SomeInputObject]
}
input SomeInputObject {
field: String
}
""",
"Type 'SomeInputObject' is not an output type type." -> Seq(Pos(3, 19))
)
}
"Type System: Objects can only implement unique interfaces" should {
"rejects an Object implementing a non-Interface type" in invalidSchema(
graphql"""
type Query {
test: BadObject
}
input SomeInputObject {
field: String
}
type BadObject implements SomeInputObject {
field: String
}
""",
"Type 'SomeInputObject' is not an output type type." -> Seq(Pos(10, 35))
)
"rejects an Object implementing the same interface twice" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface & AnotherInterface {
field: String
}
""",
"Object type 'AnotherObject' can implement interface 'AnotherInterface' only once." -> Seq(
Pos(10, 39),
Pos(10, 58))
)
"rejects an Object implementing the same interface twice due to extension" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
extend type AnotherObject implements AnotherInterface
""",
"Object type 'AnotherObject' can implement interface 'AnotherInterface' only once." -> Seq(
Pos(10, 39),
Pos(14, 46))
)
"rejects an Object implementing the extended interface due to missing field (via extension)" in invalidSchema(
buildSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
""").extend(graphql"""
extend type AnotherObject implements AnotherInterface
"""),
"Object type 'AnotherObject' can implement interface 'AnotherInterface' only once." -> Seq(
Pos(10, 39),
Pos(2, 46))
)
}
"Type System: Interface extensions should be valid" should {
"rejects an Object implementing the extended interface due to missing field args" in invalidSchema(
buildSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
""").extend(graphql"""
extend interface AnotherInterface {
newField(test: Boolean): String
}
extend type AnotherObject {
newField: String
}
"""),
"AnotherInterface.newField expects argument 'test', but AnotherObject.newField does not provide it." -> Seq(
Pos(3, 20),
Pos(7, 11))
)
"rejects Objects implementing the extended interface due to mismatching interface type" in invalidSchema(
buildSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String
}
""").extend(graphql"""
extend interface AnotherInterface {
newInterfaceField: NewInterface
}
interface NewInterface {
newField: String
}
interface MismatchingInterface {
newField: String
}
extend type AnotherObject {
newInterfaceField: MismatchingInterface
}
"""),
"AnotherInterface.newInterfaceField expects type 'NewInterface', but AnotherObject.newInterfaceField provides type 'MismatchingInterface'." -> Seq(
Pos(15, 11),
Pos(3, 11))
)
}
"Type System: Interface fields must have output types" should {
"rejects a non-output type as an Interface field type with locations" in invalidSchema(
graphql"""
type Query {
test: SomeInterface
}
interface SomeInterface {
field: SomeInputObject
}
input SomeInputObject {
foo: String
}
""",
"Type 'SomeInputObject' is not an output type type." -> Seq(Pos(7, 18))
)
}
"Type System: Field arguments must have input types" should {
"rejects a non-input type as a field arg with locations" in invalidSchema(
graphql"""
type Query {
test(arg: SomeObject): String
}
type SomeObject {
foo: String
}
""",
"Type 'SomeObject' is not an input type type." -> Seq(Pos(3, 21))
)
}
"Type System: Input Object fields must have input types" should {
"rejects a non-input type as an input object field with locations" in invalidSchema(
graphql"""
type Query {
test(arg: SomeInputObject): String
}
input SomeInputObject {
foo: SomeObject
}
type SomeObject {
bar: String
}
""",
"Type 'SomeObject' is not an input type type." -> Seq(Pos(7, 16))
)
}
"Objects must adhere to Interface they implement" should {
"accepts an Object which implements an Interface" in validSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): String
}
""")
"accepts an Object which implements an Interface along with more fields" in validSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): String
anotherField: String
}
""")
"accepts an Object which implements an Interface field along with additional optional arguments" in validSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String, anotherInput: String): String
}
""")
"rejects an Object with an incorrectly typed Interface field" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String): Int
}
""",
"AnotherInterface.field expects type 'String', but AnotherObject.field provides type 'Int'." -> Seq(
Pos(11, 11),
Pos(7, 11))
)
"rejects an Object with a differently typed Interface field" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
type A { foo: String }
type B { foo: String }
interface AnotherInterface {
field: A
}
type AnotherObject implements AnotherInterface {
field: B
}
""",
"AnotherInterface.field expects type 'A', but AnotherObject.field provides type 'B'." -> Seq(
Pos(14, 11),
Pos(10, 11))
)
"accepts an Object with a subtyped Interface field (interface)" in validSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: AnotherInterface
}
type AnotherObject implements AnotherInterface {
field: AnotherObject
}
""")
"accepts an Object with a subtyped Interface field (union)" in validSchema(graphql"""
type Query {
test: AnotherObject
}
type SomeObject {
field: String
}
union SomeUnionType = SomeObject
interface AnotherInterface {
field: SomeUnionType
}
type AnotherObject implements AnotherInterface {
field: SomeObject
}
""")
"rejects an Object missing an Interface argument" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field: String
}
""",
"AnotherInterface.field expects argument 'input', but AnotherObject.field does not provide it." -> Seq(
Pos(7, 17),
Pos(11, 11))
)
"rejects an Object with an incorrectly typed Interface argument" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: Int): String
}
""",
"AnotherInterface.field(input) expects type 'String', but AnotherObject.field(input) provides type 'Int'." -> Seq(
Pos(7, 17),
Pos(11, 17))
)
"rejects an Object with both an incorrectly typed field and argument" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: Int): Int
}
""",
"AnotherInterface.field expects type 'String', but AnotherObject.field provides type 'Int'." -> Seq(
Pos(11, 11),
Pos(7, 11))
)
"rejects an Object which implements an Interface field along with additional required arguments" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field(input: String): String
}
type AnotherObject implements AnotherInterface {
field(input: String, anotherInput: String!): String
}
""",
"AnotherObject.field(anotherInput) is of required type 'String!', but is not also provided by the interface AnotherInterface.field." -> Seq(
Pos(11, 32),
Pos(7, 11))
)
"accepts an Object with an equivalently wrapped Interface field type" in validSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: [String]!
}
type AnotherObject implements AnotherInterface {
field: [String]!
}
""")
"rejects an Object with a non-list Interface field list type" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: [String]
}
type AnotherObject implements AnotherInterface {
field: String
}
""",
"AnotherInterface.field expects type '[String]', but AnotherObject.field provides type 'String'." -> Seq(
Pos(11, 11),
Pos(7, 11))
)
"rejects an Object with a list Interface field non-list type" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: [String]
}
""",
"AnotherInterface.field expects type 'String', but AnotherObject.field provides type '[String]'." -> Seq(
Pos(11, 11),
Pos(7, 11))
)
"accepts an Object with a subset non-null Interface field type" in validSchema(graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String
}
type AnotherObject implements AnotherInterface {
field: String!
}
""")
"rejects an Object with a superset nullable Interface field type" in invalidSchema(
graphql"""
type Query {
test: AnotherObject
}
interface AnotherInterface {
field: String!
}
type AnotherObject implements AnotherInterface {
field: String
}
""",
"AnotherInterface.field expects type 'String!', but AnotherObject.field provides type 'String'." -> Seq(
Pos(11, 11),
Pos(7, 11))
)
}
private[this] def buildSchema(document: ast.Document) =
Schema.buildFromAst(document)
private[this] def validSchema(document: ast.Document) = buildSchema(document)
private[this] def invalidSchema(
document: ast.Document,
expected: (String, Seq[Pos])*
): Unit = invalidSchema(buildSchema(document), expected: _*)
private[this] def invalidSchema(schema: => Schema[_, _], expected: (String, Seq[Pos])*): Unit =
(Try(schema): @unchecked) match {
case Success(_) => fail("Schema was built successfully")
case Failure(e: WithViolations) =>
val violationsStr =
"Actual violations:\\n\\n" + e.violations.zipWithIndex
.map { case (v, idx) =>
val helperStr = v match {
case n: AstNodeLocation =>
" \\"" + n.simpleErrorMessage + "\\" -> Seq(" + n.locations
.map(l => s"Pos(${l.line}, ${l.column})")
.mkString(", ") + ")"
case n => n.errorMessage
}
s"(${idx + 1}) " + v.errorMessage + "\\n\\n" + helperStr
}
.mkString("\\n\\n") + "\\n\\n"
withClue(violationsStr) {
e.violations should have size expected.size
expected.foreach { case (expected, pos) =>
e.violations.exists { error =>
val message = error.errorMessage
message.contains(expected) && {
error match {
case n: AstNodeLocation => n.locations.map(p => Pos(p.line, p.column)) == pos
case _ => false
}
}
} should be(true)
}
}
}
}
| sangria-graphql/sangria | modules/derivation/src/test/scala/sangria/schema/SchemaConstraintsSpec.scala | Scala | apache-2.0 | 28,341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zouzias.spark.lucenerdd
import org.apache.lucene.document._
import org.apache.lucene.facet.FacetField
import org.apache.spark.sql.Row
import scala.reflect.ClassTag
/**
* Contains implicit conversion to [[org.apache.lucene.document.Document]]
* which prepares the index for faceted search as well.
*/
package object facets {
private val Stored = Field.Store.YES
private val DefaultFieldName = "_1"
/**
* Adds extra field on index with suffix [[FacetedLuceneRDD.FacetTextFieldSuffix]]
* This fiels is used on faceted queries
*
* @param doc Input document
* @param fieldName Field name
* @param fieldValue Field value to be indexed
*/
private def addTextFacetField(doc: Document, fieldName: String, fieldValue: String): Unit = {
if ( fieldValue.nonEmpty) { // Issues with empty strings on facets
doc.add(new FacetField(s"${fieldName}${FacetedLuceneRDD.FacetTextFieldSuffix}",
fieldValue))
}
}
implicit def intToDocument(v: Int): Document = {
val doc = new Document
doc.add(new IntPoint(DefaultFieldName, v))
addTextFacetField(doc, DefaultFieldName, v.toString)
doc
}
implicit def longToDocument(v: Long): Document = {
val doc = new Document
doc.add(new LongPoint(DefaultFieldName, v))
addTextFacetField(doc, DefaultFieldName, v.toString)
doc
}
implicit def doubleToDocument(v: Double): Document = {
val doc = new Document
doc.add(new DoublePoint(DefaultFieldName, v))
addTextFacetField(doc, DefaultFieldName, v.toString)
doc
}
implicit def floatToDocument(v: Float): Document = {
val doc = new Document
doc.add(new FloatPoint(DefaultFieldName, v))
addTextFacetField(doc, DefaultFieldName, v.toString)
doc
}
implicit def stringToDocument(s: String): Document = {
val doc = new Document
doc.add(new TextField(DefaultFieldName, s, Stored))
addTextFacetField(doc, DefaultFieldName, s)
doc
}
private def tupleTypeToDocument[T: ClassTag](doc: Document, index: Int, s: T): Document = {
typeToDocument(doc, s"_${index}", s)
}
def typeToDocument[T: ClassTag](doc: Document, fName: String, s: T): Document = {
s match {
case x: String =>
doc.add(new TextField(fName, x, Stored))
addTextFacetField(doc, fName, x)
case x: Long =>
doc.add(new LongPoint(fName, x))
doc.add(new StoredField(fName, x))
doc.add(new NumericDocValuesField(s"${fName} ${FacetedLuceneRDD.FacetNumericFieldSuffix}",
x))
case x: Int =>
doc.add(new IntPoint(fName, x))
doc.add(new StoredField(fName, x))
doc.add(new NumericDocValuesField(s"${fName}${FacetedLuceneRDD.FacetNumericFieldSuffix}",
x.toLong))
case x: Float =>
doc.add(new FloatPoint(fName, x))
doc.add(new StoredField(fName, x))
doc.add(new FloatDocValuesField(s"${fName}${FacetedLuceneRDD.FacetNumericFieldSuffix}",
x))
case x: Double =>
doc.add(new DoublePoint(fName, x))
doc.add(new StoredField(fName, x))
doc.add(new DoubleDocValuesField(s"${fName}${FacetedLuceneRDD.FacetNumericFieldSuffix}",
x))
}
doc
}
implicit def iterablePrimitiveToDocument[T: ClassTag](iter: Iterable[T]): Document = {
val doc = new Document
iter.foreach( item => tupleTypeToDocument(doc, 1, item))
doc
}
implicit def mapToDocument[T: ClassTag](map: Map[String, T]): Document = {
val doc = new Document
map.foreach{ case (key, value) =>
typeToDocument(doc, key, value)
}
doc
}
/**
* Implicit conversion for all product types, such as case classes and Tuples
* @param s
* @tparam T
* @return
*/
implicit def productTypeToDocument[T <: Product : ClassTag](s: T): Document = {
val doc = new Document
val fieldNames = s.getClass.getDeclaredFields.map(_.getName).toIterator
val fieldValues = s.productIterator
fieldValues.zip(fieldNames).foreach{ case (elem, fieldName) =>
typeToDocument(doc, fieldName, elem)
}
doc
}
/**
* Implicit conversion for Spark Row: used for DataFrame
* @param row
* @return
*/
implicit def sparkRowToDocument(row: Row): Document = {
val doc = new Document
val fieldNames = row.schema.fieldNames
fieldNames.foreach{ case fieldName =>
val index = row.fieldIndex(fieldName)
typeToDocument(doc, fieldName, row.get(index))
}
doc
}
}
| zouzias/spark-lucenerdd | src/main/scala/org/zouzias/spark/lucenerdd/facets/package.scala | Scala | apache-2.0 | 5,294 |
package pl.writeonly.re.shared.scalapipe
import utest._
object ScalaPipeOptWithTraitTest extends TestSuite with ScalaPipeOps {
def add2(x: Int): Int = x + 2
override val tests: Tests = Tests {
'into_with_lambda - {
val result = 1 |> (_ + 2)
result ==> 3
}
'into_with_method - {
val result = 1 |> add2
result ==> 3
}
}
}
| writeonly/resentiment | re/shared/src/test/scala/pl/writeonly/re/shared/scalapipe/ScalaPipeOptWithTraitTest.scala | Scala | mit | 369 |
package org.jetbrains.plugins.scala.lang.psi.api.base.types
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScRefineStat extends ScalaPsiElement {
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScRefineStat.scala | Scala | apache-2.0 | 229 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTablePartition}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.carbondata.common.annotations.{InterfaceAudience, InterfaceStability}
import org.apache.carbondata.core.metadata.schema.table.column.{ColumnSchema => ColumnSchema}
/**
* This interface defines those common api used by carbon for spark-2.1 and spark-2.2 integration,
* but are not defined in SessionCatalog or HiveSessionCatalog to give contract to the
* Concrete implementation classes.
* For example CarbonSessionCatalog defined in 2.1 and 2.2.
*
*/
@InterfaceAudience.Internal
@InterfaceStability.Stable
trait CarbonSessionCatalog {
/**
* implementation to be provided by each CarbonSessionCatalog based on on used ExternalCatalog
*
* @return
*/
def getClient(): org.apache.spark.sql.hive.client.HiveClient
/**
* The method returns the CarbonEnv instance
*
* @return
*/
def getCarbonEnv(): CarbonEnv
/**
* This is alternate way of getting partition information. It first fetches all partitions from
* hive and then apply filter instead of querying hive along with filters.
*
* @param partitionFilters
* @param sparkSession
* @param identifier
* @return
*/
def getPartitionsAlternate(partitionFilters: Seq[Expression], sparkSession: SparkSession,
identifier: TableIdentifier): Seq[CatalogTablePartition]
/**
* Update the storageformat with new location information
*/
def updateStorageLocation(
path: Path,
storage: CatalogStorageFormat,
newTableName: String,
dbName: String): CatalogStorageFormat
/**
* Method used to update the table name
* @param oldTableIdentifier old table identifier
* @param newTableIdentifier new table identifier
* @param newTablePath new table path
*/
def alterTableRename(oldTableIdentifier: TableIdentifier,
newTableIdentifier: TableIdentifier,
newTablePath: String): Unit = {
getClient().runSqlHive(
s"ALTER TABLE ${ oldTableIdentifier.database.get }.${ oldTableIdentifier.table } " +
s"RENAME TO ${ oldTableIdentifier.database.get }.${ newTableIdentifier.table }")
getClient().runSqlHive(
s"ALTER TABLE ${ oldTableIdentifier.database.get }.${ newTableIdentifier.table } " +
s"SET SERDEPROPERTIES" +
s"('tableName'='${ newTableIdentifier.table }', " +
s"'dbName'='${ oldTableIdentifier.database.get }', 'tablePath'='${ newTablePath }')")
}
/**
* Below method will be used to update serd properties
* @param tableIdentifier table identifier
* @param schemaParts schema parts
* @param cols cols
*/
def alterTable(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[ColumnSchema]]): Unit = {
getClient()
.runSqlHive(s"ALTER TABLE ${ tableIdentifier.database.get }.${ tableIdentifier.table } " +
s"SET TBLPROPERTIES(${ schemaParts })")
}
/**
* Below method will be used to add new column
* @param tableIdentifier table identifier
* @param schemaParts schema parts
* @param cols cols
*/
def alterAddColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[ColumnSchema]]): Unit
/**
* Below method will be used to drop column
* @param tableIdentifier table identifier
* @param schemaParts schema parts
* @param cols cols
*/
def alterDropColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[ColumnSchema]]): Unit
/**
* Below method will be used to alter data type of column in schema
* @param tableIdentifier table identifier
* @param schemaParts schema parts
* @param cols cols
*/
def alterColumnChangeDataTypeOrRename(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[ColumnSchema]]): Unit
}
| manishgupta88/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala | Scala | apache-2.0 | 4,887 |
package spinoco.protocol.http.codec
import java.time.ZonedDateTime
import org.scalacheck.Prop._
import org.scalacheck.Properties
import scodec.{Attempt, DecodeResult}
import scodec.bits.BitVector
import spinoco.protocol.http.{HttpResponseHeader, HttpStatusCode, HttpVersion}
import spinoco.protocol.http.header._
import spinoco.protocol.http.header.value.{ProductDescription, ServerProduct}
import spinoco.protocol.mime.{ContentType, MediaType}
object HttpResponseHeaderCodecSpec extends Properties("HttpResponseHeaderCodec") {
property("Response.decode") = secure {
HttpResponseHeaderCodec.defaultCodec.decode(BitVector(
Seq(
"HTTP/1.1 200 OK"
, "Date: Mon, 27 Jul 2009 12:28:53 GMT"
, "Server: Apache/2.2.14"
, "Last-Modified: Wed, 22 Jul 2009 19:15:56 GMT"
, "Content-Length: 88"
, "Content-Type: text/html"
, "Connection: Closed"
).mkString("\\r\\n").getBytes
)) ?= Attempt.successful(DecodeResult(
HttpResponseHeader(
status = HttpStatusCode.Ok
, reason = "OK"
, headers = List(
Date(ZonedDateTime.parse("2009-07-27T12:28:53+00:00").toLocalDateTime)
, Server(ServerProduct(List(ProductDescription("Apache",Some("2.2.14")))))
, `Last-Modified`(ZonedDateTime.parse("2009-07-22T19:15:56+00:00").toLocalDateTime)
, `Content-Length`(88)
, `Content-Type`(ContentType.TextContent(MediaType.`text/html`, None))
, Connection(List("Closed"))
)
, version = HttpVersion.V1_1
)
, BitVector.empty))
}
property("Response.encode") = secure {
HttpResponseHeaderCodec.defaultCodec.encode(
HttpResponseHeader(
status = HttpStatusCode.Ok
, reason = "OK localised"
, headers = List(
Date(ZonedDateTime.parse("2009-07-27T12:28:53+00:00").toLocalDateTime)
, Server(ServerProduct(List(ProductDescription("Apache",Some("2.2.14")))))
, `Last-Modified`(ZonedDateTime.parse("2009-07-22T19:15:56+00:00").toLocalDateTime)
, `Content-Length`(88)
, `Content-Type`(ContentType.TextContent(MediaType.`text/html`, None))
, Connection(List("Closed"))
)
, version = HttpVersion.V1_1
)
).map(_.decodeAscii) ?= Attempt.successful(Right(
Seq(
"HTTP/1.1 200 OK localised"
, "Date: Mon, 27 Jul 2009 12:28:53 GMT"
, "Server: Apache/2.2.14"
, "Last-Modified: Wed, 22 Jul 2009 19:15:56 GMT"
, "Content-Length: 88"
, "Content-Type: text/html"
, "Connection: Closed"
).mkString("\\r\\n")
))
}
property("Response.decode.no-reason") = secure {
HttpResponseHeaderCodec.defaultCodec.decode(BitVector(
Seq(
"HTTP/1.1 200"
, "Date: Mon, 27 Jul 2009 12:28:53 GMT"
, "Server: Apache/2.2.14"
, "Last-Modified: Wed, 22 Jul 2009 19:15:56 GMT"
, "Content-Length: 88"
, "Content-Type: text/html"
, "Connection: Closed"
).mkString("\\r\\n").getBytes
)) ?= Attempt.successful(DecodeResult(
HttpResponseHeader(
status = HttpStatusCode.Ok
, reason = "OK"
, headers = List(
Date(ZonedDateTime.parse("2009-07-27T12:28:53+00:00").toLocalDateTime)
, Server(ServerProduct(List(ProductDescription("Apache",Some("2.2.14")))))
, `Last-Modified`(ZonedDateTime.parse("2009-07-22T19:15:56+00:00").toLocalDateTime)
, `Content-Length`(88)
, `Content-Type`(ContentType.TextContent(MediaType.`text/html`, None))
, Connection(List("Closed"))
)
, version = HttpVersion.V1_1
)
, BitVector.empty))
}
property("Response.encode.no-reason") = secure {
HttpResponseHeaderCodec.defaultCodec.encode(
HttpResponseHeader(
status = HttpStatusCode.Ok
, reason = ""
, headers = List(
Date(ZonedDateTime.parse("2009-07-27T12:28:53+00:00").toLocalDateTime)
, Server(ServerProduct(List(ProductDescription("Apache",Some("2.2.14")))))
, `Last-Modified`(ZonedDateTime.parse("2009-07-22T19:15:56+00:00").toLocalDateTime)
, `Content-Length`(88)
, `Content-Type`(ContentType.TextContent(MediaType.`text/html`, None))
, Connection(List("Closed"))
)
, version = HttpVersion.V1_1
)
).map(_.decodeAscii) ?= Attempt.successful(Right(
Seq(
"HTTP/1.1 200 OK"
, "Date: Mon, 27 Jul 2009 12:28:53 GMT"
, "Server: Apache/2.2.14"
, "Last-Modified: Wed, 22 Jul 2009 19:15:56 GMT"
, "Content-Length: 88"
, "Content-Type: text/html"
, "Connection: Closed"
).mkString("\\r\\n")
))
}
}
| Spinoco/protocol | http/src/test/scala/spinoco/protocol/http/codec/HttpResponseHeaderCodecSpec.scala | Scala | mit | 4,753 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testing.common
/** Mode in which the test bridge executes. */
private[testing] sealed abstract class TestBridgeMode
private[testing] object TestBridgeMode {
case object FullBridge extends TestBridgeMode
final case class HTMLRunner(tests: IsolatedTestSet) extends TestBridgeMode
implicit object TestBridgeModeSerializer extends Serializer[TestBridgeMode] {
def serialize(x: TestBridgeMode, out: Serializer.SerializeState): Unit = x match {
case FullBridge =>
out.write(0)
case HTMLRunner(tests) =>
out.write(1)
out.write(tests)
}
def deserialize(in: Serializer.DeserializeState): TestBridgeMode = {
in.read[Int]() match {
case 0 => FullBridge
case 1 => HTMLRunner(in.read[IsolatedTestSet]())
case n => throw new java.io.IOException(s"Unknown bridge mode: $n")
}
}
}
}
| scala-js/scala-js | test-common/src/main/scala/org/scalajs/testing/common/TestBridgeMode.scala | Scala | apache-2.0 | 1,162 |
/*
Copyright 2017-2020 Erik Erlandson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.isarnproject.pipelines.spark.fi
import scala.util.Random._
import utest._
import org.isarnproject.testing.spark.SparkTestSuite
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.ml.linalg.Vectors
object FeatureImportanceSuite extends SparkTestSuite {
// set the seed before generating any data
setSeed(7337L * 3773L)
val raw1 = Vector.fill(1000){ Vectors.dense(nextGaussian, nextGaussian, nextGaussian) }
.map{v => (5*v(0) + 2*v(2), v)}
val train1 = spark.createDataFrame(raw1).toDF("label", "features").cache()
// Spark DataFrames and RDDs are lazy.
// Make sure data are actually created prior to testing, or ordering
// may change based on test ordering
val count1 = train1.count()
val tests = Tests {
val lr = new LinearRegression()
.setMaxIter(10)
.setRegParam(0.3)
.setElasticNetParam(0.8)
val lrModel = lr.fit(train1)
val fi = new TDigestFI().setCompression(0.3)
val fiMod = fi.fit(train1)
.setTargetModel(lrModel)
.setDeviationMeasure("rms-dev")
.setFeatureNames(Array.tabulate(3){j=>s"x$j"})
val imp = fiMod.transform(train1)
val impmap = imp.collect.map { r =>
(r.getAs[String](0), r.getAs[Double](1)) }
.toMap
approx(impmap("x0"), 6.65, 0.5)
approx(impmap("x1"), 0.00, 0.001)
approx(impmap("x2"), 2.50, 0.5)
}
}
| isarn/isarn-sketches-spark | src/test/scala/org/isarnproject/pipelines/spark/fi.scala | Scala | apache-2.0 | 1,943 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.aliyun.logservice
import scala.collection.mutable.ArrayBuffer
import com.aliyun.openservices.log.common.Consts.CursorMode
import org.apache.spark.{InterruptibleIterator, Partition, SparkContext, TaskContext}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
class LoghubRDD(
@transient sc: SparkContext,
project: String,
logStore: String,
consumerGroup: String,
accessKeyId: String,
accessKeySecret: String,
endpoint: String,
duration: Long,
zkParams: Map[String, String],
shardOffsets: ArrayBuffer[ShardOffsetRange],
checkpointDir: String,
commitBeforeNext: Boolean = true) extends RDD[String](sc, Nil) with Logging {
@transient var client: LoghubClientAgent = _
@transient var zkHelper: ZkHelper = _
private val enablePreciseCount: Boolean =
sc.getConf.getBoolean("spark.streaming.loghub.count.precise.enable", true)
private def initialize(): Unit = {
client = LoghubRDD.getOrCreateLoghubClient(accessKeyId, accessKeySecret, endpoint)
zkHelper = LoghubRDD.getZkHelper(zkParams, checkpointDir, project, logStore)
}
override def count(): Long = {
if (enablePreciseCount) {
super.count()
} else {
try {
val numShards = shardOffsets.size
shardOffsets.map(shard => {
val from = client.GetCursorTime(project, logStore, shard.shardId, shard.beginCursor)
.GetCursorTime()
val endCursor =
client.GetCursor(project, logStore, shard.shardId, CursorMode.END).GetCursor()
val to = client.GetCursorTime(project, logStore, shard.shardId, endCursor)
.GetCursorTime()
val res = client.GetHistograms(project, logStore, from, to, "", "*")
if (!res.IsCompleted()) {
logWarning(s"Failed to get complete count for [$project]-[$logStore]-" +
s"[${shard.shardId}] from ${shard.beginCursor} to ${endCursor}, " +
s"use ${res.GetTotalCount()} instead. " +
s"This warning does not introduce any job failure, but may affect some information " +
s"about this batch.")
}
(res.GetTotalCount() * 1.0D) / numShards
}).sum.toLong
} catch {
case e: Exception =>
logWarning(s"Failed to get statistics of rows in [$project]-[$logStore], use 0L " +
s"instead. This warning does not introduce any job failure, but may affect some " +
s"information about this batch.", e)
0L
}
}
}
@DeveloperApi
override def compute(split: Partition, context: TaskContext): Iterator[String] = {
initialize()
val shardPartition = split.asInstanceOf[ShardPartition]
try {
val loghubIterator = new LoghubIterator(zkHelper, client, project, logStore,
consumerGroup, shardPartition.shardId, shardPartition.startCursor,
shardPartition.count.toInt, context, commitBeforeNext, shardPartition.logGroupStep)
new InterruptibleIterator[String](context, loghubIterator)
} catch {
case _: Exception =>
Iterator.empty.asInstanceOf[Iterator[String]]
}
}
override protected def getPartitions: Array[Partition] = {
val rate = sc.getConf.get("spark.streaming.loghub.maxRatePerShard", "10000").toInt
val logGroupStep = sc.getConf.get("spark.loghub.batchGet.step", "100").toInt
val count = rate * duration / 1000
shardOffsets.zipWithIndex.map { case (p, idx) =>
new ShardPartition(id, idx, p.shardId, count, project, logStore, consumerGroup,
accessKeyId, accessKeySecret, endpoint, p.beginCursor, logGroupStep)
.asInstanceOf[Partition]
}.toArray
}
private class ShardPartition(
rddId: Int,
partitionId: Int,
val shardId: Int,
val count: Long,
project: String,
logStore: String,
consumerGroup: String,
accessKeyId: String,
accessKeySecret: String,
endpoint: String,
val startCursor: String,
val logGroupStep: Int = 100) extends Partition with Logging {
override def hashCode(): Int = 41 * (41 + rddId) + shardId
override def equals(other: Any): Boolean = super.equals(other)
override def index: Int = partitionId
}
}
// scalastyle:off
object LoghubRDD extends Logging {
private var zkHelper: ZkHelper = _
private var loghubClient: LoghubClientAgent = _
def getOrCreateLoghubClient(accessKeyId: String, accessKeySecret: String,
endpoint: String): LoghubClientAgent = {
if (loghubClient == null) {
loghubClient = new LoghubClientAgent(endpoint, accessKeyId, accessKeySecret)
}
loghubClient
}
def getZkHelper(zkParams: Map[String, String],
checkpointDir: String,
project: String,
logstore: String): ZkHelper = {
if (zkHelper == null) {
zkHelper = new ZkHelper(zkParams, checkpointDir, project, logstore)
zkHelper.initialize()
}
zkHelper
}
override def finalize(): Unit = {
super.finalize()
try {
if (zkHelper != null) {
zkHelper.close()
zkHelper = null
}
} catch {
case e: Exception => logWarning("Exception when close zkClient.", e)
}
}
}
| aliyun/aliyun-emapreduce-sdk | emr-logservice/src/main/scala/org/apache/spark/streaming/aliyun/logservice/LoghubRDD.scala | Scala | artistic-2.0 | 6,133 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.mkldnn
import com.intel.analytics.bigdl.mkl.{DataType, Memory}
import org.scalatest.{FlatSpec, Matchers}
class MemoryDataSpec extends FlatSpec with Matchers {
"memory data hashCode comparison data" should "work correctly" in {
val fp32 = HeapData(Array(4, 3), Memory.Format.nc, DataType.F32)
val int8 = HeapData(Array(4, 3), Memory.Format.nc, DataType.S8)
fp32.hashCode() == int8.hashCode() should not be (true)
}
"memory data hashCode comparison native" should "work correctly" in {
val fp32 = NativeData(Array(3, 3), Memory.Format.nc, DataType.F32)
val int8 = NativeData(Array(3, 3), Memory.Format.nc, DataType.S8)
fp32.hashCode() == int8.hashCode() should not be (true)
}
"memory data hashCode comparison heap and native" should "work correctly" in {
val heap = HeapData(Array(3, 3), Memory.Format.nc, DataType.F32)
val native = NativeData(Array(3, 3), Memory.Format.nc, DataType.F32)
heap.hashCode() == native.hashCode() should not be (true)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/mkldnn/MemoryDataSpec.scala | Scala | apache-2.0 | 1,651 |
package com.ing.bakery.testing
import java.net.InetSocketAddress
import cats.effect.{ContextShift, IO, Resource, Timer}
import com.ing.bakery.smoke.printGreen
import org.scalactic.source
import org.scalatest.compatible.Assertion
import org.scalatest.funspec.FixtureAsyncFunSpecLike
import org.scalatest.{ConfigMap, FutureOutcome, Tag}
import scala.concurrent.duration._
/** Abstracts the common test practices across the Bakery project. */
abstract class BakeryFunSpec extends FixtureAsyncFunSpecLike {
implicit val contextShift: ContextShift[IO] =
IO.contextShift(executionContext)
implicit val timer: Timer[IO] =
IO.timer(executionContext)
/** Represents the "sealed resources context" that each test can use. */
type TestContext
/** Represents external arguments to the test context builder. */
type TestArguments
/** Creates a `Resource` which allocates and liberates the expensive resources each test can use.
* For example web servers, network connection, database mocks.
*
* The objective of this function is to provide "sealed resources context" to each test, that means context
* that other tests simply cannot touch.
*
* @param testArguments arguments built by the `argumentsBuilder` function.
* @return the resources each test can use
*/
def contextBuilder(testArguments: TestArguments): Resource[IO, TestContext]
/** Refines the `ConfigMap` populated with the -Dkey=value arguments coming from the "sbt testOnly" command.
*
* @param config map populated with the -Dkey=value arguments.
* @return the data structure used by the `contextBuilder` function.
*/
def argumentsBuilder(config: ConfigMap): TestArguments
/** Runs a single test with a clean sealed context. */
def test(specText: String, testTags: Tag*)(runTest: TestContext => IO[Assertion])(implicit pos: source.Position): Unit =
it(specText, testTags: _*) { testParams =>
val (args, debugMode) = testParams
if(debugMode)
(for {
context <- contextBuilder(args)
holdResult <- Resource.eval(runTest(context).attempt)
_ = println("Preliminary tests results:")
_ = holdResult match {
case Left(e) =>
print(Console.RED)
println(e.getMessage)
e.printStackTrace()
print(Console.RESET)
case Right(r) =>
println(Console.GREEN + r + Console.RESET)
}
_ <- HoldCleanup.resource(InetSocketAddress.createUnresolved("0.0.0.0", 9191))
result <- Resource.eval(IO.fromEither(holdResult))
} yield result).use(IO.pure).unsafeToFuture()
else
contextBuilder(args).use(runTest).unsafeToFuture()
}
/** Tries every second f until it succeeds or until 20 attempts have been made. */
def eventually[A](f: IO[A]): IO[A] =
within(1.minute, 20)(f)
def eventually[A](message: String)(f: IO[A]): IO[A] =
eventually(f).flatMap(a => printGreen(message) *> IO.pure(a))
/** Retries the argument f until it succeeds or time/split attempts have been made,
* there exists a delay of time for each retry.
*/
def within[A](time: FiniteDuration, split: Int)(f: IO[A]): IO[A] = {
def inner(count: Int, times: FiniteDuration): IO[A] = {
if (count < 1) f else f.attempt.flatMap {
case Left(_) => IO.sleep(times) *> inner(count - 1, times)
case Right(a) => IO(a)
}
}
inner(split, time / split)
}
override type FixtureParam = (TestArguments, Boolean)
override def withFixture(test: OneArgAsyncTest): FutureOutcome =
test.apply {
val debugMode = test.configMap.getOrElse("debug", "false") match {
case "yes" | "true" | "t" | "y" => true
case _ => false
}
(argumentsBuilder(test.configMap), debugMode)
}
}
| ing-bank/baker | bakery/integration-tests/src/test/scala/com/ing/bakery/testing/BakeryFunSpec.scala | Scala | mit | 3,852 |
package tw.joecwu.sumologic.scala.client
import scala.util.parsing.json.JSONFormat
import com.ning.http.client.Response
import org.json4s._
import org.json4s.native.JsonMethods._
import java.util.zip.GZIPInputStream
object JsonHelper{
implicit def stringToJsonStr(v:String) = new {
def toJsonStr = if(v!=null) "\\""+JSONFormat.quoteString(v)+"\\"" else "null"
}
}
object GzipJson extends (Response => JValue) {
def apply(r: Response) = {
if(r.getHeader("content-encoding")!=null && r.getHeader("content-encoding").equals("gzip")){
(parse(new GZIPInputStream(r.getResponseBodyAsStream), true))
}else
(dispatch.as.String andThen (s => parse(StringInput(s), true)))(r)
}
}
| joecwu/sumologic-scala-client | src/main/scala/tw/joecwu/sumologic/scala/client/JsonHelper.scala | Scala | apache-2.0 | 704 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.evaluation
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._
import org.apache.spark.sql.DataFrame
/**
* Evaluator for multilabel classification.
* @param predictionAndLabels an RDD of (predictions, labels) pairs,
* both are non-null Arrays, each with unique elements.
*/
class MultilabelMetrics(predictionAndLabels: RDD[(Array[Double], Array[Double])]) {
/**
* An auxiliary constructor taking a DataFrame.
* @param predictionAndLabels a DataFrame with two double array columns: prediction and label
*/
private[mllib] def this(predictionAndLabels: DataFrame) =
this(predictionAndLabels.map(r => (r.getSeq[Double](0).toArray, r.getSeq[Double](1).toArray)))
private lazy val numDocs: Long = predictionAndLabels.count()
private lazy val numLabels: Long = predictionAndLabels.flatMap { case (_, labels) =>
labels}.distinct().count()
/**
* Returns subset accuracy
* (for equal sets of labels)
*/
lazy val subsetAccuracy: Double = predictionAndLabels.filter { case (predictions, labels) =>
predictions.deep == labels.deep
}.count().toDouble / numDocs
/**
* Returns accuracy
*/
lazy val accuracy: Double = predictionAndLabels.map { case (predictions, labels) =>
labels.intersect(predictions).size.toDouble /
(labels.size + predictions.size - labels.intersect(predictions).size)}.sum / numDocs
/**
* Returns Hamming-loss
*/
lazy val hammingLoss: Double = predictionAndLabels.map { case (predictions, labels) =>
labels.size + predictions.size - 2 * labels.intersect(predictions).size
}.sum / (numDocs * numLabels)
/**
* Returns document-based precision averaged by the number of documents
*/
lazy val precision: Double = predictionAndLabels.map { case (predictions, labels) =>
if (predictions.size > 0) {
predictions.intersect(labels).size.toDouble / predictions.size
} else {
0
}
}.sum / numDocs
/**
* Returns document-based recall averaged by the number of documents
*/
lazy val recall: Double = predictionAndLabels.map { case (predictions, labels) =>
labels.intersect(predictions).size.toDouble / labels.size
}.sum / numDocs
/**
* Returns document-based f1-measure averaged by the number of documents
*/
lazy val f1Measure: Double = predictionAndLabels.map { case (predictions, labels) =>
2.0 * predictions.intersect(labels).size / (predictions.size + labels.size)
}.sum / numDocs
private lazy val tpPerClass = predictionAndLabels.flatMap { case (predictions, labels) =>
predictions.intersect(labels)
}.countByValue()
private lazy val fpPerClass = predictionAndLabels.flatMap { case (predictions, labels) =>
predictions.diff(labels)
}.countByValue()
private lazy val fnPerClass = predictionAndLabels.flatMap { case(predictions, labels) =>
labels.diff(predictions)
}.countByValue()
/**
* Returns precision for a given label (category)
* @param label the label.
*/
def precision(label: Double): Double = {
val tp = tpPerClass(label)
val fp = fpPerClass.getOrElse(label, 0L)
if (tp + fp == 0) 0.0 else tp.toDouble / (tp + fp)
}
/**
* Returns recall for a given label (category)
* @param label the label.
*/
def recall(label: Double): Double = {
val tp = tpPerClass(label)
val fn = fnPerClass.getOrElse(label, 0L)
if (tp + fn == 0) 0.0 else tp.toDouble / (tp + fn)
}
/**
* Returns f1-measure for a given label (category)
* @param label the label.
*/
def f1Measure(label: Double): Double = {
val p = precision(label)
val r = recall(label)
if((p + r) == 0) 0.0 else 2 * p * r / (p + r)
}
private lazy val sumTp = tpPerClass.foldLeft(0L) { case (sum, (_, tp)) => sum + tp }
private lazy val sumFpClass = fpPerClass.foldLeft(0L) { case (sum, (_, fp)) => sum + fp }
private lazy val sumFnClass = fnPerClass.foldLeft(0L) { case (sum, (_, fn)) => sum + fn }
/**
* Returns micro-averaged label-based precision
* (equals to micro-averaged document-based precision)
*/
lazy val microPrecision: Double = {
val sumFp = fpPerClass.foldLeft(0L){ case(cum, (_, fp)) => cum + fp}
sumTp.toDouble / (sumTp + sumFp)
}
/**
* Returns micro-averaged label-based recall
* (equals to micro-averaged document-based recall)
*/
lazy val microRecall: Double = {
val sumFn = fnPerClass.foldLeft(0.0){ case(cum, (_, fn)) => cum + fn}
sumTp.toDouble / (sumTp + sumFn)
}
/**
* Returns micro-averaged label-based f1-measure
* (equals to micro-averaged document-based f1-measure)
*/
lazy val microF1Measure: Double = 2.0 * sumTp / (2 * sumTp + sumFnClass + sumFpClass)
/**
* Returns the sequence of labels in ascending order
*/
lazy val labels: Array[Double] = tpPerClass.keys.toArray.sorted
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala | Scala | apache-2.0 | 5,659 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.motion
import squants._
import squants.time.{ SecondTimeDerivative, TimeDerivative, TimeSquared }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value Double
*/
final class Yank private (val value: Double, val unit: YankUnit)
extends Quantity[Yank]
with TimeDerivative[Force]
with SecondTimeDerivative[Momentum] {
def dimension = Yank
protected[squants] def timeIntegrated = Newtons(toNewtonsPerSecond)
protected[squants] def time = Seconds(1)
def *(that: TimeSquared): Momentum = this * that.time1 * that.time2
def toNewtonsPerSecond = to(NewtonsPerSecond)
}
object Yank extends Dimension[Yank] {
private[motion] def apply[A](n: A, unit: YankUnit)(implicit num: Numeric[A]) = new Yank(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "Yank"
def primaryUnit = NewtonsPerSecond
def siUnit = NewtonsPerSecond
def units = Set(NewtonsPerSecond)
}
trait YankUnit extends UnitOfMeasure[Yank] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = Yank(n, this)
}
object NewtonsPerSecond extends YankUnit with PrimaryUnit with SiUnit {
val symbol = "N/s"
}
object YankConversions {
lazy val newtonPerSecond = NewtonsPerSecond(1)
implicit class YankConversions[A](n: A)(implicit num: Numeric[A]) {
def newtonsPerSecond = NewtonsPerSecond(n)
}
implicit object YankNumeric extends AbstractQuantityNumeric[Yank](Yank.primaryUnit)
}
| garyKeorkunian/squants | shared/src/main/scala/squants/motion/Yank.scala | Scala | apache-2.0 | 1,974 |
package smr
package collection
import storage._;
import util._;
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.{Buffer, ArrayBuffer, ArrayBuilder, Builder}
import scala.collection._
/**
*
* @author dlwh
*/
trait DistributedIterable[+T] extends GenIterable[T] with DistributedIterableLike[T,DistributedIterable[T],Iterable[T]] with Sharded {
}
trait DistributedIterableLike[+T,+Repr<:DistributedIterable[T],+Local<:Iterable[T] with IterableLike[T,Local]] extends HackGenTraversableLike[T,Repr] {
private def TODO = throw new RuntimeException("TODO")
val distributor: Distributor with Storage;
protected[this] val distributedBuilderFactory:CanBuildDistributedFrom[Repr,T,Repr];
protected[this] def localBuilder:Builder[T,Local]
def named(name: String):DistributedIterable[T]
def isPersistent: Boolean
def repr: Repr = this.asInstanceOf[Repr]
def local: Local = {
val b = localBuilder;
for(t <- distributor.doTasks(repr.shards,new RetrieveTask[T]()).flatMap(_._2)) {
b += t
}
b.result;
}
def seq = local;
def collect[S, That](pf: PartialFunction[T, S])(implicit bf: CanBuildFrom[Repr, S, That]): That = bf ifDistributed { pbf =>
val builder = pbf(repr)
val results = distributor.doTasks(repr.shards, new CollectTask(builder.localBuilder,pf))
builder.resultFromSummaries(results);
} otherwise local.collect(pf)(bfToLocal(bf))
def map[B,That](f: T=>B)(implicit cbf: CanBuildFrom[Repr,B,That]) = cbf ifDistributed { dbf =>
val builder = dbf(repr);
val results = distributor.doTasks(repr.shards,new MapTask(builder.localBuilder,f));
builder.resultFromSummaries(results);
} otherwise {
local.map(f)(bfToLocal(cbf))
}
def aggregate[B](z: B)(seqop: (B, T) => B, combop: (B, B) => B) = {
distributor.doTasks(repr.shards,new AggregateTask[T,B](z, seqop,combop)).map(_._2).reduce(combop);
}
def flatMap[B, That](f: (T) => GenTraversableOnce[B])(implicit bf: CanBuildFrom[Repr, B, That]) = bf ifDistributed { dbf =>
val builder = dbf(repr);
val results = distributor.doTasks(repr.shards,new FlatMapTask(builder.localBuilder,f));
builder.resultFromSummaries(results);
} otherwise {
local.flatMap(f)(bfToLocal(bf))
}
def foreach[U](f: (T) => U) {
distributor.doTasks(repr.shards, new ForeachTask(f));
}
def filter(pred: (T) => Boolean) = {
val builder = distributedBuilderFactory apply (repr)
val results = distributor.doTasks(repr.shards,new FilterTask[T,builder.LocalSummary,Iterable[T]](builder.localBuilder, pred))
builder.resultFromSummaries(results);
}
def filterNot(pred: (T)=>Boolean) = filter(pred andThen { ! _});
def scan[B >: T, That](z: B)(op: (B, B) => B)(implicit cbf: CanBuildFrom[Repr, B, That]) = TODO
def scanLeft[B, That](z: B)(op: (B, T) => B)(implicit cbf: CanBuildFrom[Repr, B, That]):That = local.scanLeft(z)(op)(bfToLocal(cbf));
def scanRight[B, That](z: B)(op: (T, B) => B)(implicit cbf: CanBuildFrom[Repr, B, That]):That = local.scanRight(z)(op)(bfToLocal(cbf));
def fold[A1 >: T](z: A1)(op: (A1, A1) => A1) = aggregate(z)(op,op);
def reduce[U >: T](op: (U, U) => U) = {
val results = distributor.doTasks(repr.shards, new ReduceTask[T,U](op));
results.map(_._2).reduce(op);
}
def reduceOption[A1 >: T](op: (A1, A1) => A1) = if(isEmpty) None else Some(reduce(op));
def forall(pred: (T) => Boolean) = TODO
def exists(pred: (T) => Boolean) = TODO
def find(pred: (T) => Boolean) = TODO
def iterator = local.iterator;
def reduceLeft[B >: T](op: (B, T) => B) = local.reduceLeft(op);
def reduceRight[B >: T](op: (T, B) => B) = local.reduceRight(op);
def reduceLeftOption[B >: T](op: (B, T) => B) = local.reduceLeftOption(op);
def reduceRightOption[B >: T](op: (T, B) => B) = local.reduceRightOption(op);
def min[A1 >: T](implicit ord: Ordering[A1]) = reduce((a,b) => if(ord.lt(a,b)) a else b);
def max[A1 >: T](implicit ord: Ordering[A1]) = reduce((a,b) => if(ord.lt(a,b)) b else a);
def maxBy[S](f: T => S)(implicit cmp: Ordering[S]): T = {
if (repr.isEmpty) throw new UnsupportedOperationException("empty.maxBy")
reduce((x, y) => if (cmp.gteq(f(x), f(y))) x else y)
}
def minBy[S](f: T => S)(implicit cmp: Ordering[S]): T = {
if (repr.isEmpty) throw new UnsupportedOperationException("empty.minBy")
reduce((x, y) => if (cmp.lteq(f(x), f(y))) x else y)
}
def sum[A1 >: T](implicit num: Numeric[A1]) = reduce(num.plus _)
def product[A1 >: T](implicit num: Numeric[A1]) = reduce(num.times _)
def /:[B](z: B)(op: (B, T) => B) = foldLeft(z)(op)
def :\[B](z: B)(op: (T, B) => B) = foldRight(z)(op)
def foldLeft[B](z: B)(op: (B, T) => B) = local.foldLeft(z)(op);
def foldRight[B](z:B)(op: (T,B)=>B) = local.foldRight(z)(op);
def count(p: (T) => Boolean) = aggregate(0)({(x:Int,t: T) => if (p(t)) x+1 else x}, _ + _);
def hasDefiniteSize: Boolean = true;
def nonEmpty: Boolean = exists(_ => true);
// def isEmpty = !nonEmpty;
def size = aggregate(0)((x:Int, t: T)=>(x+1), _ + _);
private[this] def bfToLocal[B,That](cbf: CanBuildFrom[Repr,B,That]) = new CanBuildFrom[Local,B,That] {
def apply(from: Local) = cbf(repr);
def apply() = cbf();
}
/* TODO: make these distributed collections */
def toIndexedSeq[A1 >: T] = local.toIndexedSeq
def toList = local.toList
def toStream = local.toStream;
def toIterator = local.toIterator
def toSeq = local.toSeq
def toSet[A1 >: T]:Set[A1] = local.toSet[A1]
def toMap[K, V](implicit ev: <:<[T, (K, V)]) = local.toMap;
def toBuffer[A1 >: T]:Buffer[A1] = local.toBuffer[A1];
def toTraversable:GenTraversable[T] = repr;
def toIterable = repr;
def copyToArray[B >: T](xs: Array[B]) {local.copyToArray(xs)};
def copyToArray[B >: T](xs: Array[B], start: Int) {local.copyToArray(xs,start)};
def copyToArray[B >: T](xs: Array[B], start: Int, len: Int) {local.copyToArray(xs,start,len)};
def toArray[A1 >: T](implicit evidence: ClassManifest[A1]) = local.toArray(evidence);
def mkString:String = mkString("");
def mkString(sep: String): String = mkString("",sep,"");
def mkString(start: String, sep: String, end: String): String = start + aggregate("")(_ + _.toString, _ + _) + end;
override def toString = local.mkString(stringPrefix + "(", ", ", ")")
def stringPrefix = "DistributedIterable";
protected def exec[R,S,T](shard: Shard, task: Task[T,R,S]) = {
distributor.doTasks(IndexedSeq(shard), task);
}
protected[this] def parCombiner = TODO
def dropWhile(pred: (T) => Boolean) = TODO
def span(pred: (T) => Boolean) = TODO
def takeWhile(pred: (T) => Boolean) = TODO
def splitAt(n: Int) = TODO
def slice(unc_from: Int, unc_until: Int) = TODO
def drop(n: Int) = TODO
def take(n: Int) = TODO
def groupBy[K](f: (T) => K) = TODO
def partition(pred: (T) => Boolean) = TODO
def ++[B >: T, That](that: GenTraversableOnce[B])(implicit bf: CanBuildFrom[DistributedIterable[T], B, That]) = TODO
def zipAll[B, A1 >: T, That](that: GenIterable[B], thisElem: A1, thatElem: B)(implicit bf: CanBuildFrom[GenIterable[T], (A1, B), That]) = TODO
def zipWithIndex[A1 >: T, That](implicit bf: CanBuildFrom[GenIterable[T], (A1, Int), That]) = TODO
def zip[A1 >: T, B, That](that: GenIterable[B])(implicit bf: CanBuildFrom[GenIterable[T], (A1, B), That]) = TODO
def sameElements[A1 >: T](that: GenIterable[A1]) = TODO
}
object DistributedIterable {
implicit def builder[T,U]: CanBuildDistributedFrom[DistributedIterable[T],U,DistributedIterable[U]] = new CanBuildDistributedFrom[DistributedIterable[T],U,DistributedIterable[U]] {
def apply(): DistributedBuilder[U, DistributedIterable[U]] = sys.error("TODO");
def apply(t: DistributedIterable[T]): DistributedBuilder[U, DistributedIterable[U]] = {
new DistributedBuilder[U,DistributedIterable[U]] {
type LocalSummary = Int
def summary() = innerBuilder.size;
val innerBuilder = new ArrayBuffer[U];
def +=(elem: U): this.type = {
innerBuilder += elem;
this;
}
def clear() {innerBuilder.clear()}
def result() = sys.error("unsupported"); // ugh TODO
def localBuilder = localSerBuilder[U];
def resultFromSummaries(summaries: IndexedSeq[(Iterable[Shard], LocalSummary)]) = {
val sizes = summaries.map(_._2);
val uris = summaries.flatMap(_._1);
new SimpleDistributedIterable[U](uris, sizes, t.distributor);
}
}
}
}
private def localSerBuilder[Elem]:LocalBuilder[Elem,Iterable[Elem],Int] = new LocalBuilder[Elem,Iterable[Elem],Int] {
def summary() = innerBuilder.size;
val innerBuilder = new ArrayBuffer[Elem];
def +=(elem: Elem): this.type = {
innerBuilder += elem;
this;
}
def result = innerBuilder;
def copy = localSerBuilder[Elem]
def clear() {innerBuilder.clear()};
}
}
private[smr] class SimpleDistributedIterable[+T](val shards: IndexedSeq[Shard], sizes: IndexedSeq[Int],
val distributor: Distributor with Storage,
val isPersistent: Boolean = false) extends DistributedIterable[T] {
protected[this] val distributedBuilderFactory:CanBuildDistributedFrom[DistributedIterable[T],T,DistributedIterable[T]] = DistributedIterable.builder[T,T]
protected[this] def localBuilder = Iterable.canBuildFrom[T].apply();
def named(name: String) = {
val newShards = distributor.name(shards,name)
new SimpleDistributedIterable[T](newShards,sizes,distributor, true)
}
override val size = sizes.foldLeft(0)(_ + _);
}
| dlwh/smr | src/main/scala/smr/collection/DistributedIterable.scala | Scala | apache-2.0 | 9,707 |
package com.ignition.frame
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KafkaOutputSpec extends FrameFlowSpecification {
"KafkaOutput" should {
"construct with defaults" in {
val step = KafkaOutput("field", "topic")
step.field === "field"
step.topic === "topic"
step.brokers must beEmpty
step.kafkaProperties must beEmpty
}
"build with helpers" in {
val step = KafkaOutput("field", "topic") brokers ("b1, b2,b3") properties ("a" -> "b")
step.brokers.toList === List("b1", "b2", "b3")
step.kafkaProperties === Map("a" -> "b")
}
"save to/load from xml" in {
val k1 = KafkaOutput("field", "topic") brokers ("b1, b2") properties ("a" -> "b")
k1.toXml must ==/(
<kafka-output>
<field>field</field>
<topic>topic</topic>
<brokers>b1,b2</brokers>
<kafkaProperties>
<property name="a">b</property>
</kafkaProperties>
</kafka-output>)
KafkaOutput.fromXml(k1.toXml) === k1
}
"save to/load from json" in {
import org.json4s.JsonDSL._
val k1 = KafkaOutput("field", "topic") brokers ("b1, b2") properties ("a" -> "b")
k1.toJson === ("tag" -> "kafka-output") ~ ("field" -> "field") ~ ("topic" -> "topic") ~
("brokers" -> "b1,b2") ~ ("kafkaProperties" -> List(("name" -> "a") ~ ("value" -> "b")))
}
"be unserializable" in assertUnserializable(KafkaOutput("field", "topic"))
}
} | uralian/ignition | src/test/scala/com/ignition/frame/KafkaOutputSpec.scala | Scala | apache-2.0 | 1,536 |
// Databricks notebook source exported at Sat, 26 Dec 2015 13:47:34 UTC
// MAGIC %md # Step 1: Understand your Data
// MAGIC
// MAGIC The first step of doing anything with data is taking a look at it.
// MAGIC - What's the schema
// MAGIC - What's the distribution of data
// MAGIC - Is it dense or sparse
// MAGIC
// MAGIC This notebook contains some example data analysis techniques before
// MAGIC training a recommendation system. Therefore the dataset used should
// MAGIC have columns regarding a user, an item, and the rating of that user
// MAGIC for that item.
// COMMAND ----------
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
// COMMAND ----------
val df = ... // Load your data
// COMMAND ----------
// MAGIC %md Take a look at schema first
// COMMAND ----------
df.printSchema
// COMMAND ----------
// MAGIC %md Get some summary statistics for numerical columns
// COMMAND ----------
df.describe().show()
// COMMAND ----------
val userColumn = "YOUR_USER_COLUMN" // the name of the column containing user id's in the DataFrame
val itemColumn = "YOUR_ITEM_COLUMN" // the name of the column containing item id's in the DataFrame
val ratingColumn = "YOUR_RATING_COLUMN" // the name of the column containing ratings in the DataFrame
// COMMAND ----------
// MAGIC %md ### Let's look at the average ratings each user gave and the average rating each product received
// COMMAND ----------
val userRatings = df.groupBy(userColumn).agg(
mean(ratingColumn).as("avgRating"),
count(ratingColumn).as("numRatings")).sort($"avgRating".desc, $"numRatings".desc)
// COMMAND ----------
userRatings.show()
// COMMAND ----------
val prodRatings = df.groupBy(itemColumn).agg(
mean(ratingColumn).as("avgRating"),
count(ratingColumn).as("numRatings")).sort($"avgRating".desc, $"numRatings".desc)
// COMMAND ----------
prodRatings.show()
// COMMAND ----------
// MAGIC %md ### Let's create a histogram to check out the distribution of ratings
// MAGIC
// MAGIC We will use the Bucketizer available in spark.ml to create the histogram
// COMMAND ----------
import org.apache.spark.ml.feature._
// COMMAND ----------
df.select(min(ratingColumn), max(ratingColumn)).show()
// COMMAND ----------
val minRating = // copy values from above
val maxRating = // copy values from above
val step = 0.5 // change this to your liking
// COMMAND ----------
// create a range of values from minRating until maxRating by a meaningful step, e.g. 1 to 5 by 0.5
val splits = Array.tabulate(math.ceil(maxRating - minRating).toInt * (1 / step).toInt + 1)(i => i * step + minRating)
// COMMAND ----------
val bucketizer = new Bucketizer()
bucketizer.setSplits(splits)
bucketizer.setInputCol("avgRating")
bucketizer.setOutputCol("bucket")
// COMMAND ----------
// MAGIC %md Look at the distribution of the average ratings of users
// COMMAND ----------
val userRatingBuckets = bucketizer.transform(userRatings).cache()
// COMMAND ----------
userRatingBuckets
.groupBy("bucket")
.agg(count("*").as("numUsers"))
.sort("bucket")
.select(concat(concat($"bucket" * step + minRating, lit(" - ")), ($"bucket" + 1) * step + minRating).as("rating"), $"numUsers").show()
// COMMAND ----------
// MAGIC %md Look at the distribution of the average ratings of products
// COMMAND ----------
val prodRatingBuckets = bucketizer.transform(prodRatings).cache()
// COMMAND ----------
prodRatingBuckets
.groupBy("bucket")
.agg(count("*").as("numProds"))
.sort("bucket")
.select(concat(concat($"bucket" * step + minRating, lit(" - ")), ($"bucket" + 1) * step + minRating).as("rating"), $"numProds").show()
// COMMAND ----------
| brkyvz/spark-pipeline | scala/step-1/Exploration.scala | Scala | mit | 3,717 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, UUID}
import scala.concurrent.Future
import org.apache.spark.{MapOutputStatistics, SparkFunSuite, TaskContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.{FunctionIdentifier, InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Statistics, UnresolvedHint}
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreeNodeTag
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, QueryStageExec}
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, BroadcastExchangeLike, ShuffleExchangeExec, ShuffleExchangeLike, ShuffleOrigin}
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.COLUMN_BATCH_SIZE
import org.apache.spark.sql.internal.StaticSQLConf.SPARK_SESSION_EXTENSIONS
import org.apache.spark.sql.types.{DataType, Decimal, IntegerType, LongType, Metadata, StructType}
import org.apache.spark.sql.vectorized.{ColumnarArray, ColumnarBatch, ColumnarMap, ColumnVector}
import org.apache.spark.unsafe.types.UTF8String
/**
* Test cases for the [[SparkSessionExtensions]].
*/
class SparkSessionExtensionSuite extends SparkFunSuite {
private def create(
builder: SparkSessionExtensionsProvider): Seq[SparkSessionExtensionsProvider] = Seq(builder)
private def stop(spark: SparkSession): Unit = {
spark.stop()
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
}
private def withSession(
builders: Seq[SparkSessionExtensionsProvider])(f: SparkSession => Unit): Unit = {
val builder = SparkSession.builder().master("local[1]")
builders.foreach(builder.withExtensions)
val spark = builder.getOrCreate()
try f(spark) finally {
stop(spark)
}
}
test("inject analyzer rule") {
withSession(Seq(_.injectResolutionRule(MyRule))) { session =>
assert(session.sessionState.analyzer.extendedResolutionRules.contains(MyRule(session)))
}
}
test("inject post hoc resolution analyzer rule") {
withSession(Seq(_.injectPostHocResolutionRule(MyRule))) { session =>
assert(session.sessionState.analyzer.postHocResolutionRules.contains(MyRule(session)))
}
}
test("inject check analysis rule") {
withSession(Seq(_.injectCheckRule(MyCheckRule))) { session =>
assert(session.sessionState.analyzer.extendedCheckRules.contains(MyCheckRule(session)))
}
}
test("inject optimizer rule") {
withSession(Seq(_.injectOptimizerRule(MyRule))) { session =>
assert(session.sessionState.optimizer.batches.flatMap(_.rules).contains(MyRule(session)))
}
}
test("SPARK-33621: inject a pre CBO rule") {
withSession(Seq(_.injectPreCBORule(MyRule))) { session =>
assert(session.sessionState.optimizer.preCBORules.contains(MyRule(session)))
}
}
test("inject spark planner strategy") {
withSession(Seq(_.injectPlannerStrategy(MySparkStrategy))) { session =>
assert(session.sessionState.planner.strategies.contains(MySparkStrategy(session)))
}
}
test("inject parser") {
val extension = create { extensions =>
extensions.injectParser((_: SparkSession, _: ParserInterface) => CatalystSqlParser)
}
withSession(extension) { session =>
assert(session.sessionState.sqlParser === CatalystSqlParser)
}
}
test("inject multiple rules") {
withSession(Seq(_.injectOptimizerRule(MyRule),
_.injectPlannerStrategy(MySparkStrategy))) { session =>
assert(session.sessionState.optimizer.batches.flatMap(_.rules).contains(MyRule(session)))
assert(session.sessionState.planner.strategies.contains(MySparkStrategy(session)))
}
}
test("inject stacked parsers") {
val extension = create { extensions =>
extensions.injectParser((_: SparkSession, _: ParserInterface) => CatalystSqlParser)
extensions.injectParser(MyParser)
extensions.injectParser(MyParser)
}
withSession(extension) { session =>
val parser = MyParser(session, MyParser(session, CatalystSqlParser))
assert(session.sessionState.sqlParser === parser)
}
}
test("inject function") {
val extensions = create { extensions =>
extensions.injectFunction(MyExtensions.myFunction)
}
withSession(extensions) { session =>
assert(session.sessionState.functionRegistry
.lookupFunction(MyExtensions.myFunction._1).isDefined)
}
}
case class MyHintRule(spark: SparkSession) extends Rule[LogicalPlan] {
val MY_HINT_NAME = Set("CONVERT_TO_EMPTY")
override def apply(plan: LogicalPlan): LogicalPlan =
plan.resolveOperators {
case h: UnresolvedHint if MY_HINT_NAME.contains(h.name.toUpperCase(Locale.ROOT)) =>
LocalRelation(h.output, data = Seq.empty, isStreaming = h.isStreaming)
}
}
test("inject custom hint rule") {
withSession(Seq(_.injectPostHocResolutionRule(MyHintRule))) { session =>
assert(
session.range(1).hint("CONVERT_TO_EMPTY").logicalPlan.isInstanceOf[LocalRelation],
"plan is expected to be a local relation"
)
}
}
test("inject adaptive query prep rule") {
val extensions = create { extensions =>
// inject rule that will run during AQE query stage preparation and will add custom tags
// to the plan
extensions.injectQueryStagePrepRule(session => MyQueryStagePrepRule())
// inject rule that will run during AQE query stage optimization and will verify that the
// custom tags were written in the preparation phase
extensions.injectColumnar(session =>
MyColumnarRule(MyNewQueryStageRule(), MyNewQueryStageRule()))
}
withSession(extensions) { session =>
session.sessionState.conf.setConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED, true)
assert(session.sessionState.queryStagePrepRules.contains(MyQueryStagePrepRule()))
assert(session.sessionState.columnarRules.contains(
MyColumnarRule(MyNewQueryStageRule(), MyNewQueryStageRule())))
import session.sqlContext.implicits._
val data = Seq((100L), (200L), (300L)).toDF("vals").repartition(1)
val df = data.selectExpr("vals + 1")
df.collect()
}
}
test("inject columnar AQE on") {
testInjectColumnar(true)
}
test("inject columnar AQE off") {
testInjectColumnar(false)
}
private def testInjectColumnar(enableAQE: Boolean): Unit = {
def collectPlanSteps(plan: SparkPlan): Seq[Int] = plan match {
case a: AdaptiveSparkPlanExec =>
assert(a.toString.startsWith("AdaptiveSparkPlan isFinalPlan=true"))
collectPlanSteps(a.executedPlan)
case _ => plan.collect {
case _: ReplacedRowToColumnarExec => 1
case _: ColumnarProjectExec => 10
case _: ColumnarToRowExec => 100
case s: QueryStageExec => collectPlanSteps(s.plan).sum
case _: MyShuffleExchangeExec => 1000
case _: MyBroadcastExchangeExec => 10000
}
}
val extensions = create { extensions =>
extensions.injectColumnar(session =>
MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))
}
withSession(extensions) { session =>
session.sessionState.conf.setConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED, enableAQE)
assert(session.sessionState.columnarRules.contains(
MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())))
import session.sqlContext.implicits._
// perform a join to inject a broadcast exchange
val left = Seq((1, 50L), (2, 100L), (3, 150L)).toDF("l1", "l2")
val right = Seq((1, 50L), (2, 100L), (3, 150L)).toDF("r1", "r2")
val data = left.join(right, $"l1" === $"r1")
// repartitioning avoids having the add operation pushed up into the LocalTableScan
.repartition(1)
val df = data.selectExpr("l2 + r2")
// execute the plan so that the final adaptive plan is available when AQE is on
df.collect()
val found = collectPlanSteps(df.queryExecution.executedPlan).sum
// 1 MyBroadcastExchangeExec
// 1 MyShuffleExchangeExec
// 1 ColumnarToRowExec
// 2 ColumnarProjectExec
// 1 ReplacedRowToColumnarExec
// so 11121 is expected.
assert(found == 11121)
// Verify that we get back the expected, wrong, result
val result = df.collect()
assert(result(0).getLong(0) == 101L) // Check that broken columnar Add was used.
assert(result(1).getLong(0) == 201L)
assert(result(2).getLong(0) == 301L)
}
}
test("reset column vectors") {
val session = SparkSession.builder()
.master("local[1]")
.config(COLUMN_BATCH_SIZE.key, 2)
.withExtensions { extensions =>
extensions.injectColumnar(session =>
MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) }
.getOrCreate()
try {
assert(session.sessionState.columnarRules.contains(
MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())))
import session.sqlContext.implicits._
val input = Seq((100L), (200L), (300L))
val data = input.toDF("vals").repartition(1)
val df = data.selectExpr("vals + 1")
val result = df.collect()
assert(result sameElements input.map(x => Row(x + 2)))
} finally {
stop(session)
}
}
test("use custom class for extensions") {
val session = SparkSession.builder()
.master("local[1]")
.config(SPARK_SESSION_EXTENSIONS.key, classOf[MyExtensions].getCanonicalName)
.getOrCreate()
try {
assert(session.sessionState.planner.strategies.contains(MySparkStrategy(session)))
assert(session.sessionState.analyzer.extendedResolutionRules.contains(MyRule(session)))
assert(session.sessionState.analyzer.postHocResolutionRules.contains(MyRule(session)))
assert(session.sessionState.analyzer.extendedCheckRules.contains(MyCheckRule(session)))
assert(session.sessionState.optimizer.batches.flatMap(_.rules).contains(MyRule(session)))
assert(session.sessionState.sqlParser.isInstanceOf[MyParser])
assert(session.sessionState.functionRegistry
.lookupFunction(MyExtensions.myFunction._1).isDefined)
assert(session.sessionState.columnarRules.contains(
MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())))
} finally {
stop(session)
}
}
test("use multiple custom class for extensions in the specified order") {
val session = SparkSession.builder()
.master("local[1]")
.config(SPARK_SESSION_EXTENSIONS.key, Seq(
classOf[MyExtensions2].getCanonicalName,
classOf[MyExtensions].getCanonicalName).mkString(","))
.getOrCreate()
try {
assert(session.sessionState.planner.strategies.containsSlice(
Seq(MySparkStrategy2(session), MySparkStrategy(session))))
val orderedRules = Seq(MyRule2(session), MyRule(session))
val orderedCheckRules = Seq(MyCheckRule2(session), MyCheckRule(session))
val parser = MyParser(session, CatalystSqlParser)
assert(session.sessionState.analyzer.extendedResolutionRules.containsSlice(orderedRules))
assert(session.sessionState.analyzer.postHocResolutionRules.containsSlice(orderedRules))
assert(session.sessionState.analyzer.extendedCheckRules.containsSlice(orderedCheckRules))
assert(session.sessionState.optimizer.batches.flatMap(_.rules).filter(orderedRules.contains)
.containsSlice(orderedRules ++ orderedRules)) // The optimizer rules are duplicated
assert(session.sessionState.sqlParser === parser)
assert(session.sessionState.functionRegistry
.lookupFunction(MyExtensions.myFunction._1).isDefined)
assert(session.sessionState.functionRegistry
.lookupFunction(MyExtensions2.myFunction._1).isDefined)
} finally {
stop(session)
}
}
test("allow an extension to be duplicated") {
val session = SparkSession.builder()
.master("local[1]")
.config(SPARK_SESSION_EXTENSIONS.key, Seq(
classOf[MyExtensions].getCanonicalName,
classOf[MyExtensions].getCanonicalName).mkString(","))
.getOrCreate()
try {
assert(session.sessionState.planner.strategies.count(_ === MySparkStrategy(session)) === 2)
assert(session.sessionState.analyzer.extendedResolutionRules.count(_ === MyRule(session)) ===
2)
assert(session.sessionState.analyzer.postHocResolutionRules.count(_ === MyRule(session)) ===
2)
assert(session.sessionState.analyzer.extendedCheckRules.count(_ === MyCheckRule(session)) ===
2)
assert(session.sessionState.optimizer.batches.flatMap(_.rules)
.count(_ === MyRule(session)) === 4) // The optimizer rules are duplicated
val outerParser = session.sessionState.sqlParser
assert(outerParser.isInstanceOf[MyParser])
assert(outerParser.asInstanceOf[MyParser].delegate.isInstanceOf[MyParser])
assert(session.sessionState.functionRegistry
.lookupFunction(MyExtensions.myFunction._1).isDefined)
} finally {
stop(session)
}
}
test("use the last registered function name when there are duplicates") {
val session = SparkSession.builder()
.master("local[1]")
.config(SPARK_SESSION_EXTENSIONS.key, Seq(
classOf[MyExtensions2].getCanonicalName,
classOf[MyExtensions2Duplicate].getCanonicalName).mkString(","))
.getOrCreate()
try {
val lastRegistered = session.sessionState.functionRegistry
.lookupFunction(FunctionIdentifier("myFunction2"))
assert(lastRegistered.isDefined)
assert(lastRegistered.get !== MyExtensions2.myFunction._2)
assert(lastRegistered.get === MyExtensions2Duplicate.myFunction._2)
} finally {
stop(session)
}
}
test("SPARK-35380: Loading extensions from ServiceLoader") {
val builder = SparkSession.builder().master("local[1]")
Seq(None, Some(classOf[YourExtensions].getName)).foreach { ext =>
ext.foreach(builder.config(SPARK_SESSION_EXTENSIONS.key, _))
val session = builder.getOrCreate()
try {
assert(session.sql("select get_fake_app_name()").head().getString(0) === "Fake App Name")
} finally {
stop(session)
}
}
}
test("SPARK-35673: user-defined hint and unrecognized hint in subquery") {
withSession(Seq(_.injectPostHocResolutionRule(MyHintRule))) { session =>
// unrecognized hint
QueryTest.checkAnswer(
session.sql(
"""
|SELECT *
|FROM (
| SELECT /*+ some_random_hint_that_does_not_exist */ 42
|)
|""".stripMargin),
Row(42) :: Nil)
// user-defined hint
QueryTest.checkAnswer(
session.sql(
"""
|SELECT *
|FROM (
| SELECT /*+ CONVERT_TO_EMPTY */ 42
|)
|""".stripMargin),
Nil)
}
}
}
case class MyRule(spark: SparkSession) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan
}
case class MyCheckRule(spark: SparkSession) extends (LogicalPlan => Unit) {
override def apply(plan: LogicalPlan): Unit = { }
}
case class MySparkStrategy(spark: SparkSession) extends SparkStrategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = Seq.empty
}
case class MyParser(spark: SparkSession, delegate: ParserInterface) extends ParserInterface {
override def parsePlan(sqlText: String): LogicalPlan =
delegate.parsePlan(sqlText)
override def parseExpression(sqlText: String): Expression =
delegate.parseExpression(sqlText)
override def parseTableIdentifier(sqlText: String): TableIdentifier =
delegate.parseTableIdentifier(sqlText)
override def parseFunctionIdentifier(sqlText: String): FunctionIdentifier =
delegate.parseFunctionIdentifier(sqlText)
override def parseMultipartIdentifier(sqlText: String): Seq[String] =
delegate.parseMultipartIdentifier(sqlText)
override def parseTableSchema(sqlText: String): StructType =
delegate.parseTableSchema(sqlText)
override def parseDataType(sqlText: String): DataType =
delegate.parseDataType(sqlText)
}
object MyExtensions {
val myFunction = (FunctionIdentifier("myFunction"),
new ExpressionInfo(
"noClass",
"myDb",
"myFunction",
"usage",
"extended usage",
" Examples:",
"""
note
""",
"",
"3.0.0",
"""
deprecated
""",
""),
(_: Seq[Expression]) => Literal(5, IntegerType))
}
case class CloseableColumnBatchIterator(itr: Iterator[ColumnarBatch],
f: ColumnarBatch => ColumnarBatch) extends Iterator[ColumnarBatch] {
var cb: ColumnarBatch = null
private def closeCurrentBatch(): Unit = {
if (cb != null) {
cb.close
cb = null
}
}
TaskContext.get().addTaskCompletionListener[Unit]((tc: TaskContext) => {
closeCurrentBatch()
})
override def hasNext: Boolean = {
closeCurrentBatch()
itr.hasNext
}
override def next(): ColumnarBatch = {
closeCurrentBatch()
cb = f(itr.next())
cb
}
}
object NoCloseColumnVector extends Logging {
def wrapIfNeeded(cv: ColumnVector): NoCloseColumnVector = cv match {
case ref: NoCloseColumnVector =>
ref
case vec => NoCloseColumnVector(vec)
}
}
/**
* Provide a ColumnVector so ColumnarExpression can close temporary values without
* having to guess what type it really is.
*/
case class NoCloseColumnVector(wrapped: ColumnVector) extends ColumnVector(wrapped.dataType) {
private var refCount = 1
/**
* Don't actually close the ColumnVector this wraps. The producer of the vector will take
* care of that.
*/
override def close(): Unit = {
// Empty
}
override def hasNull: Boolean = wrapped.hasNull
override def numNulls(): Int = wrapped.numNulls
override def isNullAt(rowId: Int): Boolean = wrapped.isNullAt(rowId)
override def getBoolean(rowId: Int): Boolean = wrapped.getBoolean(rowId)
override def getByte(rowId: Int): Byte = wrapped.getByte(rowId)
override def getShort(rowId: Int): Short = wrapped.getShort(rowId)
override def getInt(rowId: Int): Int = wrapped.getInt(rowId)
override def getLong(rowId: Int): Long = wrapped.getLong(rowId)
override def getFloat(rowId: Int): Float = wrapped.getFloat(rowId)
override def getDouble(rowId: Int): Double = wrapped.getDouble(rowId)
override def getArray(rowId: Int): ColumnarArray = wrapped.getArray(rowId)
override def getMap(ordinal: Int): ColumnarMap = wrapped.getMap(ordinal)
override def getDecimal(rowId: Int, precision: Int, scale: Int): Decimal =
wrapped.getDecimal(rowId, precision, scale)
override def getUTF8String(rowId: Int): UTF8String = wrapped.getUTF8String(rowId)
override def getBinary(rowId: Int): Array[Byte] = wrapped.getBinary(rowId)
override protected def getChild(ordinal: Int): ColumnVector = wrapped.getChild(ordinal)
}
trait ColumnarExpression extends Expression with Serializable {
/**
* Returns true if this expression supports columnar processing through [[columnarEval]].
*/
def supportsColumnar: Boolean = true
/**
* Returns the result of evaluating this expression on the entire
* [[org.apache.spark.sql.vectorized.ColumnarBatch]]. The result of
* calling this may be a single [[org.apache.spark.sql.vectorized.ColumnVector]] or a scalar
* value. Scalar values typically happen if they are a part of the expression i.e. col("a") + 100.
* In this case the 100 is a [[org.apache.spark.sql.catalyst.expressions.Literal]] that
* [[org.apache.spark.sql.catalyst.expressions.Add]] would have to be able to handle.
*
* By convention any [[org.apache.spark.sql.vectorized.ColumnVector]] returned by [[columnarEval]]
* is owned by the caller and will need to be closed by them. This can happen by putting it into
* a [[org.apache.spark.sql.vectorized.ColumnarBatch]] and closing the batch or by closing the
* vector directly if it is a temporary value.
*/
def columnarEval(batch: ColumnarBatch): Any = {
throw new IllegalStateException(s"Internal Error ${this.getClass} has column support mismatch")
}
// We need to override equals because we are subclassing a case class
override def equals(other: Any): Boolean = {
if (!super.equals(other)) {
return false
}
return other.isInstanceOf[ColumnarExpression]
}
override def hashCode(): Int = super.hashCode()
}
object ColumnarBindReferences extends Logging {
// Mostly copied from BoundAttribute.scala so we can do columnar processing
def bindReference[A <: ColumnarExpression](
expression: A,
input: AttributeSeq,
allowFailures: Boolean = false): A = {
expression.transform { case a: AttributeReference =>
val ordinal = input.indexOf(a.exprId)
if (ordinal == -1) {
if (allowFailures) {
a
} else {
sys.error(s"Couldn't find $a in ${input.attrs.mkString("[", ",", "]")}")
}
} else {
new ColumnarBoundReference(ordinal, a.dataType, input(ordinal).nullable)
}
}.asInstanceOf[A]
}
/**
* A helper function to bind given expressions to an input schema.
*/
def bindReferences[A <: ColumnarExpression](
expressions: Seq[A],
input: AttributeSeq): Seq[A] = {
expressions.map(ColumnarBindReferences.bindReference(_, input))
}
}
class ColumnarBoundReference(ordinal: Int, dataType: DataType, nullable: Boolean)
extends BoundReference(ordinal, dataType, nullable) with ColumnarExpression {
override def columnarEval(batch: ColumnarBatch): Any = {
// Because of the convention that the returned ColumnVector must be closed by the
// caller we wrap this column vector so a close is a NOOP, and let the original source
// of the vector close it.
NoCloseColumnVector.wrapIfNeeded(batch.column(ordinal))
}
}
class ColumnarAlias(child: ColumnarExpression, name: String)(
override val exprId: ExprId = NamedExpression.newExprId,
override val qualifier: Seq[String] = Seq.empty,
override val explicitMetadata: Option[Metadata] = None,
override val nonInheritableMetadataKeys: Seq[String] = Seq.empty)
extends Alias(child, name)(exprId, qualifier, explicitMetadata, nonInheritableMetadataKeys)
with ColumnarExpression {
override def columnarEval(batch: ColumnarBatch): Any = child.columnarEval(batch)
override protected def withNewChildInternal(newChild: Expression): ColumnarAlias =
new ColumnarAlias(newChild.asInstanceOf[ColumnarExpression], name)(exprId, qualifier,
explicitMetadata, nonInheritableMetadataKeys)
}
class ColumnarAttributeReference(
name: String,
dataType: DataType,
nullable: Boolean = true,
override val metadata: Metadata = Metadata.empty)(
override val exprId: ExprId = NamedExpression.newExprId,
override val qualifier: Seq[String] = Seq.empty[String])
extends AttributeReference(name, dataType, nullable, metadata)(exprId, qualifier)
with ColumnarExpression {
// No columnar eval is needed because this must be bound before it is evaluated
}
class ColumnarLiteral (value: Any, dataType: DataType) extends Literal(value, dataType)
with ColumnarExpression {
override def columnarEval(batch: ColumnarBatch): Any = value
}
/**
* A version of ProjectExec that adds in columnar support.
*/
class ColumnarProjectExec(projectList: Seq[NamedExpression], child: SparkPlan)
extends ProjectExec(projectList, child) {
override def supportsColumnar: Boolean =
projectList.forall(_.asInstanceOf[ColumnarExpression].supportsColumnar)
// Disable code generation
override def supportCodegen: Boolean = false
override def doExecuteColumnar() : RDD[ColumnarBatch] = {
val boundProjectList: Seq[Any] =
ColumnarBindReferences.bindReferences(
projectList.asInstanceOf[Seq[ColumnarExpression]], child.output)
val rdd = child.executeColumnar()
rdd.mapPartitions((itr) => CloseableColumnBatchIterator(itr,
(cb) => {
val newColumns = boundProjectList.map(
expr => expr.asInstanceOf[ColumnarExpression].columnarEval(cb).asInstanceOf[ColumnVector]
).toArray
new ColumnarBatch(newColumns, cb.numRows())
})
)
}
// We have to override equals because subclassing a case class like ProjectExec is not that clean
// One of the issues is that the generated equals will see ColumnarProjectExec and ProjectExec
// as being equal and this can result in the withNewChildren method not actually replacing
// anything
override def equals(other: Any): Boolean = {
if (!super.equals(other)) {
return false
}
return other.isInstanceOf[ColumnarProjectExec]
}
override def hashCode(): Int = super.hashCode()
override def withNewChildInternal(newChild: SparkPlan): ColumnarProjectExec =
new ColumnarProjectExec(projectList, newChild)
}
/**
* A version of add that supports columnar processing for longs. This version is broken
* on purpose so it adds the numbers plus 1 so that the tests can show that it was replaced.
*/
class BrokenColumnarAdd(
left: ColumnarExpression,
right: ColumnarExpression,
failOnError: Boolean = false)
extends Add(left, right, failOnError) with ColumnarExpression {
override def supportsColumnar(): Boolean = left.supportsColumnar && right.supportsColumnar
override def columnarEval(batch: ColumnarBatch): Any = {
var lhs: Any = null
var rhs: Any = null
var ret: Any = null
try {
lhs = left.columnarEval(batch)
rhs = right.columnarEval(batch)
if (lhs == null || rhs == null) {
ret = null
} else if (lhs.isInstanceOf[ColumnVector] && rhs.isInstanceOf[ColumnVector]) {
val l = lhs.asInstanceOf[ColumnVector]
val r = rhs.asInstanceOf[ColumnVector]
val result = new OnHeapColumnVector(batch.numRows(), dataType)
ret = result
for (i <- 0 until batch.numRows()) {
result.appendLong(l.getLong(i) + r.getLong(i) + 1) // BUG to show we replaced Add
}
} else if (rhs.isInstanceOf[ColumnVector]) {
val l = lhs.asInstanceOf[Long]
val r = rhs.asInstanceOf[ColumnVector]
val result = new OnHeapColumnVector(batch.numRows(), dataType)
ret = result
for (i <- 0 until batch.numRows()) {
result.appendLong(l + r.getLong(i) + 1) // BUG to show we replaced Add
}
} else if (lhs.isInstanceOf[ColumnVector]) {
val l = lhs.asInstanceOf[ColumnVector]
val r = rhs.asInstanceOf[Long]
val result = new OnHeapColumnVector(batch.numRows(), dataType)
ret = result
for (i <- 0 until batch.numRows()) {
result.appendLong(l.getLong(i) + r + 1) // BUG to show we replaced Add
}
} else {
ret = nullSafeEval(lhs, rhs)
}
} finally {
if (lhs != null && lhs.isInstanceOf[ColumnVector]) {
lhs.asInstanceOf[ColumnVector].close()
}
if (rhs != null && rhs.isInstanceOf[ColumnVector]) {
rhs.asInstanceOf[ColumnVector].close()
}
}
ret
}
override def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): BrokenColumnarAdd =
new BrokenColumnarAdd(
left = newLeft.asInstanceOf[ColumnarExpression],
right = newRight.asInstanceOf[ColumnarExpression], failOnError)
}
class CannotReplaceException(str: String) extends RuntimeException(str) {
}
case class PreRuleReplaceAddWithBrokenVersion() extends Rule[SparkPlan] {
def replaceWithColumnarExpression(exp: Expression): ColumnarExpression = exp match {
case a: Alias =>
new ColumnarAlias(replaceWithColumnarExpression(a.child),
a.name)(a.exprId, a.qualifier, a.explicitMetadata, a.nonInheritableMetadataKeys)
case att: AttributeReference =>
new ColumnarAttributeReference(att.name, att.dataType, att.nullable,
att.metadata)(att.exprId, att.qualifier)
case lit: Literal =>
new ColumnarLiteral(lit.value, lit.dataType)
case add: Add if (add.dataType == LongType) &&
(add.left.dataType == LongType) &&
(add.right.dataType == LongType) =>
// Add only supports Longs for now.
new BrokenColumnarAdd(replaceWithColumnarExpression(add.left),
replaceWithColumnarExpression(add.right))
case exp =>
throw new CannotReplaceException(s"expression " +
s"${exp.getClass} ${exp} is not currently supported.")
}
def replaceWithColumnarPlan(plan: SparkPlan): SparkPlan =
try {
plan match {
case e: ShuffleExchangeExec =>
// note that this is not actually columnar but demonstrates that exchanges can
// be replaced.
val replaced = e.withNewChildren(e.children.map(replaceWithColumnarPlan))
MyShuffleExchangeExec(replaced.asInstanceOf[ShuffleExchangeExec])
case e: BroadcastExchangeExec =>
// note that this is not actually columnar but demonstrates that exchanges can
// be replaced.
val replaced = e.withNewChildren(e.children.map(replaceWithColumnarPlan))
MyBroadcastExchangeExec(replaced.asInstanceOf[BroadcastExchangeExec])
case plan: ProjectExec =>
new ColumnarProjectExec(plan.projectList.map((exp) =>
replaceWithColumnarExpression(exp).asInstanceOf[NamedExpression]),
replaceWithColumnarPlan(plan.child))
case p =>
logWarning(s"Columnar processing for ${p.getClass} is not currently supported.")
p.withNewChildren(p.children.map(replaceWithColumnarPlan))
}
} catch {
case exp: CannotReplaceException =>
logWarning(s"Columnar processing for ${plan.getClass} is not currently supported" +
s"because ${exp.getMessage}")
plan
}
override def apply(plan: SparkPlan): SparkPlan = replaceWithColumnarPlan(plan)
}
/**
* Custom Exchange used in tests to demonstrate that shuffles can be replaced regardless of
* whether AQE is enabled.
*/
case class MyShuffleExchangeExec(delegate: ShuffleExchangeExec) extends ShuffleExchangeLike {
override def numMappers: Int = delegate.numMappers
override def numPartitions: Int = delegate.numPartitions
override def shuffleOrigin: ShuffleOrigin = {
delegate.shuffleOrigin
}
override def mapOutputStatisticsFuture: Future[MapOutputStatistics] =
delegate.submitShuffleJob
override def getShuffleRDD(partitionSpecs: Array[ShufflePartitionSpec]): RDD[_] =
delegate.getShuffleRDD(partitionSpecs)
override def runtimeStatistics: Statistics = delegate.runtimeStatistics
override def child: SparkPlan = delegate.child
override protected def doExecute(): RDD[InternalRow] = delegate.execute()
override def outputPartitioning: Partitioning = delegate.outputPartitioning
override protected def withNewChildInternal(newChild: SparkPlan): SparkPlan =
super.legacyWithNewChildren(Seq(newChild))
}
/**
* Custom Exchange used in tests to demonstrate that broadcasts can be replaced regardless of
* whether AQE is enabled.
*/
case class MyBroadcastExchangeExec(delegate: BroadcastExchangeExec) extends BroadcastExchangeLike {
override def runId: UUID = delegate.runId
override def relationFuture: java.util.concurrent.Future[Broadcast[Any]] =
delegate.relationFuture
override def completionFuture: Future[Broadcast[Any]] = delegate.submitBroadcastJob
override def runtimeStatistics: Statistics = delegate.runtimeStatistics
override def child: SparkPlan = delegate.child
override protected def doPrepare(): Unit = delegate.prepare()
override protected def doExecute(): RDD[InternalRow] = delegate.execute()
override def doExecuteBroadcast[T](): Broadcast[T] = delegate.executeBroadcast()
override def outputPartitioning: Partitioning = delegate.outputPartitioning
override protected def withNewChildInternal(newChild: SparkPlan): SparkPlan =
super.legacyWithNewChildren(Seq(newChild))
}
class ReplacedRowToColumnarExec(override val child: SparkPlan)
extends RowToColumnarExec(child) {
// We have to override equals because subclassing a case class like ProjectExec is not that clean
// One of the issues is that the generated equals will see ColumnarProjectExec and ProjectExec
// as being equal and this can result in the withNewChildren method not actually replacing
// anything
override def equals(other: Any): Boolean = {
if (!super.equals(other)) {
return false
}
return other.isInstanceOf[ReplacedRowToColumnarExec]
}
override def hashCode(): Int = super.hashCode()
override def withNewChildInternal(newChild: SparkPlan): ReplacedRowToColumnarExec =
new ReplacedRowToColumnarExec(newChild)
}
case class MyPostRule() extends Rule[SparkPlan] {
override def apply(plan: SparkPlan): SparkPlan = plan match {
case rc: RowToColumnarExec => new ReplacedRowToColumnarExec(rc.child)
case plan => plan.withNewChildren(plan.children.map(apply))
}
}
case class MyColumnarRule(pre: Rule[SparkPlan], post: Rule[SparkPlan]) extends ColumnarRule {
override def preColumnarTransitions: Rule[SparkPlan] = pre
override def postColumnarTransitions: Rule[SparkPlan] = post
}
class MyExtensions extends (SparkSessionExtensions => Unit) {
def apply(e: SparkSessionExtensions): Unit = {
e.injectPlannerStrategy(MySparkStrategy)
e.injectResolutionRule(MyRule)
e.injectPostHocResolutionRule(MyRule)
e.injectCheckRule(MyCheckRule)
e.injectOptimizerRule(MyRule)
e.injectParser(MyParser)
e.injectFunction(MyExtensions.myFunction)
e.injectColumnar(session => MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))
}
}
object QueryPrepRuleHelper {
val myPrepTag: TreeNodeTag[String] = TreeNodeTag[String]("myPrepTag")
val myPrepTagValue: String = "myPrepTagValue"
}
// this rule will run during AQE query preparation and will write custom tags to each node
case class MyQueryStagePrepRule() extends Rule[SparkPlan] {
override def apply(plan: SparkPlan): SparkPlan = plan.transformDown {
case plan =>
plan.setTagValue(QueryPrepRuleHelper.myPrepTag, QueryPrepRuleHelper.myPrepTagValue)
plan
}
}
// this rule will run during AQE query stage optimization and will verify custom tags were
// already written during query preparation phase
case class MyNewQueryStageRule() extends Rule[SparkPlan] {
override def apply(plan: SparkPlan): SparkPlan = plan.transformDown {
case plan if !plan.isInstanceOf[AdaptiveSparkPlanExec] =>
assert(plan.getTagValue(QueryPrepRuleHelper.myPrepTag).get ==
QueryPrepRuleHelper.myPrepTagValue)
plan
}
}
case class MyRule2(spark: SparkSession) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan
}
case class MyCheckRule2(spark: SparkSession) extends (LogicalPlan => Unit) {
override def apply(plan: LogicalPlan): Unit = { }
}
case class MySparkStrategy2(spark: SparkSession) extends SparkStrategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = Seq.empty
}
object MyExtensions2 {
val myFunction = (FunctionIdentifier("myFunction2"),
new ExpressionInfo(
"noClass",
"myDb",
"myFunction2",
"usage",
"extended usage",
" Examples:",
"""
note
""",
"",
"3.0.0",
"""
deprecated
""",
""),
(_: Seq[Expression]) => Literal(5, IntegerType))
}
class MyExtensions2 extends (SparkSessionExtensions => Unit) {
def apply(e: SparkSessionExtensions): Unit = {
e.injectPlannerStrategy(MySparkStrategy2)
e.injectResolutionRule(MyRule2)
e.injectPostHocResolutionRule(MyRule2)
e.injectCheckRule(MyCheckRule2)
e.injectOptimizerRule(MyRule2)
e.injectParser((_: SparkSession, _: ParserInterface) => CatalystSqlParser)
e.injectFunction(MyExtensions2.myFunction)
}
}
object MyExtensions2Duplicate {
val myFunction = (FunctionIdentifier("myFunction2"),
new ExpressionInfo(
"noClass",
"myDb",
"myFunction2",
"usage",
"extended usage",
" Examples:",
"""
note
""",
"",
"3.0.0",
"""
deprecated
""",
""),
(_: Seq[Expression]) => Literal(5, IntegerType))
}
class MyExtensions2Duplicate extends (SparkSessionExtensions => Unit) {
def apply(e: SparkSessionExtensions): Unit = {
e.injectFunction(MyExtensions2Duplicate.myFunction)
}
}
class YourExtensions extends SparkSessionExtensionsProvider {
val getAppName = (FunctionIdentifier("get_fake_app_name"),
new ExpressionInfo(
"zzz.zzz.zzz",
"",
"get_fake_app_name"),
(_: Seq[Expression]) => Literal("Fake App Name"))
override def apply(v1: SparkSessionExtensions): Unit = {
v1.injectFunction(getAppName)
}
}
| chuckchen/spark | sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala | Scala | apache-2.0 | 38,118 |
package is.hail.variant.vsm
import is.hail.HailSuite
import is.hail.annotations.BroadcastRow
import is.hail.expr.ir
import is.hail.expr.ir.{ExecuteContext, Interpret, MatrixAnnotateRowsTable, TableLiteral, TableRange, TableValue}
import is.hail.types._
import is.hail.types.virtual.{TInt32, TStruct}
import is.hail.rvd.RVD
import is.hail.utils.FastIndexedSeq
import org.testng.annotations.Test
class PartitioningSuite extends HailSuite {
@Test def testShuffleOnEmptyRDD() {
val typ = TableType(TStruct("tidx" -> TInt32), FastIndexedSeq("tidx"), TStruct.empty)
val t = TableLiteral(TableValue(ctx,
typ, BroadcastRow.empty(ctx), RVD.empty(typ.canonicalRVDType)))
val rangeReader = ir.MatrixRangeReader(100, 10, Some(10))
Interpret(
MatrixAnnotateRowsTable(
ir.MatrixRead(rangeReader.fullMatrixType, false, false, rangeReader),
t,
"foo",
product = false),
ctx, optimize = false)
.rvd.count()
}
@Test def testEmptyRDDOrderedJoin() {
val tv = Interpret.apply(TableRange(100, 6), ctx)
val nonEmptyRVD = tv.rvd
val rvdType = nonEmptyRVD.typ
val emptyRVD = RVD.empty(rvdType)
ExecuteContext.scoped() { ctx =>
emptyRVD.orderedJoin(nonEmptyRVD, "left", (_, it) => it.map(_._1), rvdType, ctx).count()
emptyRVD.orderedJoin(nonEmptyRVD, "inner", (_, it) => it.map(_._1), rvdType, ctx).count()
nonEmptyRVD.orderedJoin(emptyRVD, "left", (_, it) => it.map(_._1), rvdType, ctx).count()
nonEmptyRVD.orderedJoin(emptyRVD, "inner", (_, it) => it.map(_._1), rvdType, ctx).count()
}
}
}
| danking/hail | hail/src/test/scala/is/hail/variant/vsm/PartitioningSuite.scala | Scala | mit | 1,598 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.sync.otr
import com.waz.api.Verification
import com.waz.api.impl.ErrorResponse
import com.waz.api.impl.ErrorResponse.internalError
import com.waz.cache.LocalData
import com.waz.content.{ConversationStorage, MembersStorage, UsersStorage}
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.log.LogSE._
import com.waz.model.AssetData.RemoteData
import com.waz.model._
import com.waz.model.otr.ClientId
import com.waz.service.conversation.ConversationsService
import com.waz.service.otr.OtrService
import com.waz.service.push.PushService
import com.waz.service.{ErrorsService, UserService}
import com.waz.sync.SyncResult
import com.waz.sync.SyncResult.Failure
import com.waz.sync.client.AssetClient.{Metadata, Retention, UploadResponse}
import com.waz.sync.client.OtrClient.{ClientMismatch, EncryptedContent, MessageResponse}
import com.waz.sync.client._
import com.waz.threading.CancellableFuture
import com.waz.utils.crypto.AESUtils
import scala.concurrent.Future
import scala.concurrent.Future.successful
import scala.util.control.NonFatal
trait OtrSyncHandler {
def postOtrMessage(convId: ConvId, message: GenericMessage, recipients: Option[Set[UserId]] = None, nativePush: Boolean = true): Future[Either[ErrorResponse, RemoteInstant]]
def uploadAssetDataV3(data: LocalData, key: Option[AESKey], mime: Mime = Mime.Default, retention: Retention): CancellableFuture[Either[ErrorResponse, RemoteData]]
def postSessionReset(convId: ConvId, user: UserId, client: ClientId): Future[SyncResult]
def broadcastMessage(message: GenericMessage, retry: Int = 0, previous: EncryptedContent = EncryptedContent.Empty): Future[Either[ErrorResponse, RemoteInstant]]
}
class OtrSyncHandlerImpl(teamId: Option[TeamId],
selfClientId: ClientId,
otrClient: OtrClient,
msgClient: MessagesClient,
assetClient: AssetClient,
service: OtrService,
convsService: ConversationsService,
convStorage: ConversationStorage,
users: UserService,
members: MembersStorage,
errors: ErrorsService,
clientsSyncHandler: OtrClientsSyncHandler,
push: PushService,
usersStorage: UsersStorage) extends OtrSyncHandler with DerivedLogTag {
import OtrSyncHandler._
import com.waz.threading.Threading.Implicits.Background
override def postOtrMessage(convId: ConvId, message: GenericMessage, recipients: Option[Set[UserId]] = None, nativePush: Boolean = true) = {
import com.waz.utils.{RichEither, RichFutureEither}
def encryptAndSend(msg: GenericMessage, external: Option[Array[Byte]] = None, retries: Int = 0, previous: EncryptedContent = EncryptedContent.Empty): ErrorOr[MessageResponse] =
for {
_ <- push.waitProcessing
Some(conv) <- convStorage.get(convId)
_ = if (conv.verified == Verification.UNVERIFIED) throw UnverifiedException
us <- recipients.fold(members.getActiveUsers(convId).map(_.toSet))(rs => Future.successful(rs))
content <- service.encryptForUsers(us, msg, retries > 0, previous)
resp <-
if (content.estimatedSize < MaxContentSize)
msgClient.postMessage(conv.remoteId, OtrMessage(selfClientId, content, external, nativePush), ignoreMissing = retries > 1, recipients).future
else {
verbose(l"Message content too big, will post as External. Estimated size: ${content.estimatedSize}")
val key = AESKey()
val (sha, data) = AESUtils.encrypt(key, GenericMessage.toByteArray(msg))
val newMessage = GenericMessage(Uid(msg.messageId), Proto.External(key, sha))
encryptAndSend(newMessage, Some(data)) //abandon retries and previous EncryptedContent
}
_ <- resp.map(_.deleted).mapFuture(service.deleteClients)
_ <- resp.map(_.missing.keySet).mapFuture(convsService.addUnexpectedMembersToConv(conv.id, _))
retry <- resp.flatMapFuture {
case MessageResponse.Failure(ClientMismatch(_, missing, _, _)) if retries < 3 =>
clientsSyncHandler.syncSessions(missing).flatMap { err =>
if (err.isDefined)
error(l"syncSessions for missing clients failed: $err")
encryptAndSend(msg, external, retries + 1, content)
}
case _: MessageResponse.Failure =>
successful(Left(internalError(s"postEncryptedMessage/broadcastMessage failed with missing clients after several retries")))
case resp => Future.successful(Right(resp))
}
} yield retry
encryptAndSend(message).recover {
case UnverifiedException =>
if (!message.hasCalling)
errors.addConvUnverifiedError(convId, MessageId(message.messageId))
Left(ErrorResponse.Unverified)
case NonFatal(e) => Left(ErrorResponse.internalError(e.getMessage))
}.mapRight(_.mismatch.time)
}
override def broadcastMessage(message: GenericMessage, retry: Int = 0, previous: EncryptedContent = EncryptedContent.Empty): Future[Either[ErrorResponse, RemoteInstant]] =
push.waitProcessing.flatMap { _ =>
def broadcastRecipients = for {
acceptedOrBlocked <- users.acceptedOrBlockedUsers.head
myTeam <- teamId.fold(Future.successful(Set.empty[UserData]))(id => usersStorage.getByTeam(Set(id)))
myTeamIds = myTeam.map(_.id)
} yield acceptedOrBlocked.keySet ++ myTeamIds
broadcastRecipients.flatMap { recp =>
verbose(l"recipients: $recp")
for {
content <- service.encryptForUsers(recp, message, useFakeOnError = retry > 0, previous)
r <- otrClient.broadcastMessage(OtrMessage(selfClientId, content), ignoreMissing = retry > 1, recp).future
res <- loopIfMissingClients(r, retry, () => broadcastMessage(message, retry + 1, content))
} yield res
}
}
private def loopIfMissingClients(arg: Either[ErrorResponse, MessageResponse], retry: Int, fn: () => Future[Either[ErrorResponse, RemoteInstant]]): Future[Either[ErrorResponse, RemoteInstant]] =
arg match {
case Right(MessageResponse.Success(ClientMismatch(_, _, deleted, time))) =>
// XXX: we are ignoring redundant clients, we rely on members list to encrypt messages, so if user left the conv then we won't use his clients on next message
service.deleteClients(deleted).map(_ => Right(time))
case Right(MessageResponse.Failure(ClientMismatch(_, missing, deleted, _))) =>
service.deleteClients(deleted).flatMap { _ =>
if (retry > 2)
successful(Left(internalError(s"postEncryptedMessage/broadcastMessage failed with missing clients after several retries: $missing")))
else
clientsSyncHandler.syncSessions(missing).flatMap {
case None => fn()
case Some(err) if retry < 3 =>
error(l"syncSessions for missing clients failed: $err")
fn()
case Some(err) =>
successful(Left(err))
}
}
case Left(err) =>
error(l"postOtrMessage failed with error: $err")
successful(Left(err))
}
override def uploadAssetDataV3(data: LocalData, key: Option[AESKey], mime: Mime = Mime.Default, retention: Retention = Retention.Persistent) =
key match {
case Some(k) => CancellableFuture.lift(service.encryptAssetData(k, data)) flatMap {
case (sha, encrypted, encryptionAlg) => assetClient.uploadAsset(Metadata(retention = retention), encrypted, Mime.Default).map { //encrypted data => Default mime
case Right(UploadResponse(rId, _, token)) => Right(RemoteData(Some(rId), token, key, Some(sha), Some(encryptionAlg)))
case Left(err) => Left(err)
}
}
case _ => assetClient.uploadAsset(Metadata(public = true), data, mime).map {
case Right(UploadResponse(rId, _, _)) => Right(RemoteData(Some(rId)))
case Left(err) => Left(err)
}
}
override def postSessionReset(convId: ConvId, user: UserId, client: ClientId) = {
val msg = GenericMessage(Uid(), Proto.ClientAction.SessionReset)
val convData = convStorage.get(convId).flatMap {
case None => convStorage.get(ConvId(user.str))
case conv => successful(conv)
}
def msgContent = service.encryptTargetedMessage(user, client, msg).flatMap {
case Some(ct) => successful(Some(ct))
case None =>
for {
_ <- clientsSyncHandler.syncSessions(Map(user -> Seq(client)))
content <- service.encryptTargetedMessage(user, client, msg)
} yield content
}
convData.flatMap {
case None =>
successful(Failure(s"conv not found: $convId, for user: $user in postSessionReset"))
case Some(conv) =>
msgContent.flatMap {
case None => successful(Failure(s"session not found for $user, $client"))
case Some(content) =>
msgClient
.postMessage(conv.remoteId, OtrMessage(selfClientId, content), ignoreMissing = true).future
.map(SyncResult(_))
}
}
}
}
object OtrSyncHandler {
case object UnverifiedException extends Exception
case class OtrMessage(sender: ClientId, recipients: EncryptedContent, external: Option[Array[Byte]] = None, nativePush: Boolean = true)
val MaxInlineSize = 10 * 1024
val MaxContentSize = 256 * 1024 // backend accepts 256KB for otr messages, but we would prefer to send less
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/sync/otr/OtrSyncHandler.scala | Scala | gpl-3.0 | 10,511 |
package scalapb.compiler
import com.google.protobuf.Descriptors._
import scala.jdk.CollectionConverters._
import com.google.protobuf.Descriptors.FieldDescriptor.Type
private[compiler] class ParseFromGenerator(
implicits: DescriptorImplicits,
generator: ProtobufGenerator,
message: Descriptor
) {
import implicits._
import DescriptorImplicits.AsSymbolExtension
import generator.{
toBaseTypeExpr,
toCustomTypeExpr,
defaultValueForGet,
defaultValueForDefaultInstance,
toBaseFieldType,
toCustomType
}
case class Field(
name: String,
targetName: String,
typeName: String,
default: String,
accessor: String,
builder: String,
isRepeated: Boolean
)
val fields = message.fieldsWithoutOneofs.map { field =>
if (usesBaseTypeInBuilder(field)) {
// To handle custom types that have no default values, we wrap required/no-boxed messages in
// Option during parsing. We also apply the type mapper after parsing is complete.
if (field.isMessage)
Field(
s"__${field.scalaName}",
field.scalaName.asSymbol,
s"_root_.scala.Option[${field.baseSingleScalaTypeName}]",
C.None,
s"_root_.scala.Some(${toBaseTypeExpr(field)(s"_message__.${field.scalaName.asSymbol}", EnclosingType.None)})",
toCustomTypeExpr(field)(
s"__${field.scalaName}.getOrElse(${field.getMessageType.scalaType.fullName}.defaultInstance)",
EnclosingType.None
),
field.isRepeated
)
else
Field(
s"__${field.scalaName}",
field.scalaName.asSymbol,
field.baseSingleScalaTypeName,
defaultValueForGet(field, uncustomized = true),
toBaseTypeExpr(field)(s"_message__.${field.scalaName.asSymbol}", EnclosingType.None),
toCustomTypeExpr(field)(s"__${field.scalaName}", EnclosingType.None),
field.isRepeated
)
} else if (!field.isRepeated)
Field(
s"__${field.scalaName}",
field.scalaName.asSymbol,
field.scalaTypeName,
defaultValueForDefaultInstance(field),
s"_message__.${field.scalaName.asSymbol}",
s"__${field.scalaName}",
field.isRepeated
)
else {
val it =
if (field.collection.adapter.isDefined)
field.collection.iterator(s"_message__.${field.scalaName.asSymbol}", EnclosingType.None)
else s"_message__.${field.scalaName.asSymbol}"
Field(
s"__${field.scalaName}",
field.scalaName.asSymbol,
field.collection.builderType,
field.collection.newBuilder,
s"${field.collection.newBuilder} ++= $it",
if (field.collection.adapter.isDefined)
s"__${field.scalaName}.result().fold(throw _, identity(_))"
else
s"__${field.scalaName}.result()",
field.isRepeated
)
}
} ++ message.getRealOneofs.asScala.map { oneof =>
Field(
s"__${oneof.scalaName.name}",
oneof.scalaName.name.asSymbol,
oneof.scalaType.fullName,
oneof.empty.fullName,
s"_message__.${oneof.scalaName.nameSymbol}",
s"__${oneof.scalaName.name}",
isRepeated = false
)
} ++ (if (message.preservesUnknownFields)
Seq(
Field(
"`_unknownFields__`",
"unknownFields",
"_root_.scalapb.UnknownFieldSet.Builder",
"null",
"new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)",
"if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result()",
false
)
)
else Seq.empty)
private def usesBaseTypeInBuilder(field: FieldDescriptor) = field.isSingular
val requiredFieldMap: Map[FieldDescriptor, Int] =
message.fields.filter(fd => fd.isRequired || fd.noBoxRequired).zipWithIndex.toMap
val myFullScalaName = message.scalaType.fullNameWithMaybeRoot(message)
def generateParseFrom(printer: FunctionalPrinter): FunctionalPrinter = {
printer
.add(
s"def parseFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): $myFullScalaName = {"
)
.indented(
_.when(requiredFieldMap.nonEmpty) { fp =>
// Sets the bit 0...(n-1) inclusive to 1.
def hexBits(n: Int): String = "0x%xL".format((0 to (n - 1)).map(i => (1L << i)).sum)
val requiredFieldCount = requiredFieldMap.size
val fullWords = (requiredFieldCount - 1) / 64
val bits: Seq[String] = (1 to fullWords).map(_ => hexBits(64)) :+ hexBits(
requiredFieldCount - 64 * fullWords
)
fp.print(bits.zipWithIndex) { case (fp, (bn, index)) =>
fp.add(s"var __requiredFields$index: _root_.scala.Long = $bn")
}
}
.print(fields)((fp, f) =>
fp.add(
s"${if (f.isRepeated) "val" else "var"} ${f.name}: ${f.typeName} = ${f.default}"
)
)
.add(s"""var _done__ = false
|while (!_done__) {
| val _tag__ = _input__.readTag()
| _tag__ match {
| case 0 => _done__ = true""".stripMargin)
.print(message.fields) { (printer, field) =>
val p = {
val newValBase = if (field.isMessage) {
// In 0.10.x we can't simply call any of the new methods that relies on Builder,
// since the references message may have been generated using an older version of
// ScalaPB.
val baseName = field.baseSingleScalaTypeName
val read =
if (field.isRepeated)
s"_root_.scalapb.LiteParser.readMessage[$baseName](_input__)"
else if (usesBaseTypeInBuilder(field)) {
s"_root_.scala.Some(__${field.scalaName}.fold(_root_.scalapb.LiteParser.readMessage[$baseName](_input__))(_root_.scalapb.LiteParser.readMessage(_input__, _)))"
} else {
val expr =
if (field.isInOneof)
s"__${field.getContainingOneof.scalaName.name}.${field.scalaName.asSymbol}"
else s"__${field.scalaName}"
val mappedType = toBaseFieldType(field).apply(expr, field.enclosingType)
if (field.isInOneof || field.supportsPresence)
s"$mappedType.fold(_root_.scalapb.LiteParser.readMessage[$baseName](_input__))(_root_.scalapb.LiteParser.readMessage(_input__, _))"
else s"_root_.scalapb.LiteParser.readMessage[$baseName](_input__, $mappedType)"
}
read
} else if (field.isEnum)
s"${field.getEnumType.scalaType.fullNameWithMaybeRoot(message)}.fromValue(_input__.readEnum())"
else if (field.getType == Type.STRING) s"_input__.readStringRequireUtf8()"
else s"_input__.read${Types.capitalizedType(field.getType)}()"
val newVal =
if (!usesBaseTypeInBuilder(field)) toCustomType(field)(newValBase) else newValBase
val updateOp =
if (field.supportsPresence) s"__${field.scalaName} = Option($newVal)"
else if (field.isInOneof) {
s"__${field.getContainingOneof.scalaName.name} = ${field.oneOfTypeName.fullName}($newVal)"
} else if (field.isRepeated) s"__${field.scalaName} += $newVal"
else s"__${field.scalaName} = $newVal"
printer
.add(
s""" case ${(field.getNumber << 3) + Types.wireType(field.getType)} =>
| $updateOp""".stripMargin
)
.when(field.isRequired || field.noBoxRequired) { p =>
val fieldNumber = requiredFieldMap(field)
p.add(
s" __requiredFields${fieldNumber / 64} &= 0x${"%x".format(~(1L << fieldNumber))}L"
)
}
}
if (field.isPackable) {
val read = {
val tmp = s"""_input__.read${Types.capitalizedType(field.getType)}()"""
if (field.isEnum)
s"${field.getEnumType.scalaType.fullName}.fromValue($tmp)"
else tmp
}
val readExpr = toCustomType(field)(read)
p.add(s""" case ${(field.getNumber << 3) + Types.WIRETYPE_LENGTH_DELIMITED} => {
| val length = _input__.readRawVarint32()
| val oldLimit = _input__.pushLimit(length)
| while (_input__.getBytesUntilLimit > 0) {
| __${field.scalaName} += $readExpr
| }
| _input__.popLimit(oldLimit)
| }""".stripMargin)
} else p
}
.when(!message.preservesUnknownFields)(_.add(" case tag => _input__.skipField(tag)"))
.when(message.preservesUnknownFields)(
_.add(
""" case tag =>
| if (_unknownFields__ == null) {
| _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder()
| }
| _unknownFields__.parseField(tag, _input__)""".stripMargin
)
)
.add(" }")
.add("}")
.when(requiredFieldMap.nonEmpty) { p =>
val r = (0 until (requiredFieldMap.size + 63) / 64)
.map(i => s"__requiredFields$i != 0L")
.mkString(" || ")
p.add(
s"""if (${r}) { throw new _root_.com.google.protobuf.InvalidProtocolBufferException("Message missing required fields.") } """
)
}
.add(s"$myFullScalaName(")
.indented(
_.addWithDelimiter(",")(fields.map(e => s" ${e.targetName} = ${e.builder}"))
)
.add(")")
)
.add("}")
}
}
private[compiler] object ParseFromGenerator {
def generateParseFrom(implicits: DescriptorImplicits, pb: ProtobufGenerator, message: Descriptor)(
fp: FunctionalPrinter
): FunctionalPrinter =
new ParseFromGenerator(implicits, pb, message).generateParseFrom(fp)
}
| scalapb/ScalaPB | compiler-plugin/src/main/scala/scalapb/compiler/ParseFromGenerator.scala | Scala | apache-2.0 | 10,476 |
package test
import java.util
import scala.collection.JavaConversions
/**
*
*
* @author TheTemportalist 2/1/15
*/
object Scala {
def foreach[T](collection: util.Collection[T], callback:(T) => Unit): Unit = {
collection match {
case list: util.List[T] =>
for (item <- JavaConversions.asScalaBuffer(list)) callback(item)
case set: util.Set[T] =>
for (item <- JavaConversions.asScalaSet(set)) callback(item)
case _ =>
val iter: util.Iterator[T] = collection.iterator()
while (iter.hasNext) callback(iter.next())
}
}
}
| TheTemportalist/Test | src/main/scala/test/Scala.scala | Scala | apache-2.0 | 555 |
/*
* Copyright (C) 2012-2013 Age Mooij (http://scalapenos.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.scalapenos.riak
class ConflicResolutionNotImplemented
extends RuntimeException("A bucket operation resulted in siblings but no conflict resolver was specified for the bucket.")
case object DefaultConflictsResolver extends RiakConflictsResolver {
def resolve(values: Set[RiakValue]) = {
throw new ConflicResolutionNotImplemented()
}
}
| agemooij/riak-scala-client | src/main/scala/com/scalapenos/riak/DefaultConflictResolver.scala | Scala | apache-2.0 | 981 |
package com.socrata.soql.functions
import org.scalatest.MustMatchers
import org.scalatest.FunSuite
class SoQLFunctionsTest extends FunSuite with MustMatchers {
test("all functions have distinct identities") {
SoQLFunctions.functionsByIdentity.size must equal (SoQLFunctions.allFunctions.size)
}
test("fixed-arity and variable-arity functions do not share any names") {
val sharedNames = SoQLFunctions.nAdicFunctions.map(_.name).toSet intersect SoQLFunctions.variadicFunctions.map(_.name).toSet
sharedNames must be (Symbol("empty"))
}
test("SoQLFunctions has no Monomorphic accessors") {
for(potentialAccessor <- SoQLFunctions.potentialAccessors) {
potentialAccessor.getReturnType must not be (classOf[MonomorphicFunction[_]])
}
}
}
| socrata-platform/soql-reference | soql-stdlib/src/test/scala/com/socrata/soql/functions/SoQLFunctionsTest.scala | Scala | apache-2.0 | 774 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog
package mimir
trait CondRewriter extends DAG {
import instructions._
import dag._
def rewriteConditionals(node: DepGraph): DepGraph = {
node mapDown { recurse => {
case peer @ IUI(true, Filter(leftJoin, left, pred1), Filter(rightJoin, right, Operate(Comp, pred2))) if pred1 == pred2 =>
Cond(recurse(pred1), recurse(left), leftJoin, recurse(right), rightJoin)(peer.loc)
}}
}
}
| precog/platform | mimir/src/main/scala/com/precog/mimir/CondRewriter.scala | Scala | agpl-3.0 | 1,502 |
package eu.brosbit.opos.snippet.page
import net.liftweb.json.JsonDSL._
import eu.brosbit.opos.model.page.FlashTile
import scala.xml.Unparsed
trait FlashTileSn {
def appendTile(url: String) = {
val tiles = FlashTile.findAll("page" -> url).map(
ft =>
<div id="flashTile" style={"background-image: url('" +
ft.img + "');"}
onclick={"redirectToPage('" + ft.link + "');"}>
<div class="trans-box">
{Unparsed(ft.info)}
</div>
</div>)
if (tiles.isEmpty) <span></span>
else <div id="flashTilesContainer">
<span id="closeFlash">
<span class="glyphicon glyphicon-remove"></span>
</span>
{tiles}
</div>
}
}
| mikolajs/osp | src/main/scala/eu/brosbit/opos/snippet/page/FlashTileSn.scala | Scala | agpl-3.0 | 722 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.factories
import java.util
import org.apache.flink.table.sinks.StreamTableSink
/**
* A factory to create configured table sink instances in a streaming environment based on
* string-based properties. See also [[TableFactory]] for more information.
*
* @tparam T type of records that the factory consumes
*/
trait StreamTableSinkFactory[T] extends TableFactory {
/**
* Creates and configures a [[org.apache.flink.table.sinks.StreamTableSink]]
* using the given properties.
*
* @param properties normalized properties describing a table sink.
* @return the configured table sink.
*/
def createStreamTableSink(properties: util.Map[String, String]): StreamTableSink[T]
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/factories/StreamTableSinkFactory.scala | Scala | apache-2.0 | 1,547 |
package io.getquill.context.cassandra
import io.getquill._
class DecodeNullSpec extends Spec {
"no default values when reading null" - {
"sync" in {
import testSyncDB._
val writeEntities = quote(querySchema[DecodeNullTestWriteEntity]("DecodeNullTestEntity"))
testSyncDB.run(writeEntities.delete)
testSyncDB.run(writeEntities.insert(lift(insertValue)))
intercept[IllegalStateException] {
testSyncDB.run(query[DecodeNullTestEntity])
}
}
"async" in {
import testAsyncDB._
import scala.concurrent.ExecutionContext.Implicits.global
val writeEntities = quote(querySchema[DecodeNullTestWriteEntity]("DecodeNullTestEntity"))
val result =
for {
_ <- testAsyncDB.run(writeEntities.delete)
_ <- testAsyncDB.run(writeEntities.insert(lift(insertValue)))
result <- testAsyncDB.run(query[DecodeNullTestEntity])
} yield {
result
}
intercept[IllegalStateException] {
await {
result
}
}
}
}
case class DecodeNullTestEntity(id: Int, value: Int)
case class DecodeNullTestWriteEntity(id: Int, value: Option[Int])
val insertValue = DecodeNullTestWriteEntity(0, None)
}
| getquill/quill | quill-cassandra/src/test/scala/io/getquill/context/cassandra/DecodeNullSpec.scala | Scala | apache-2.0 | 1,254 |
object Zipper {
// A zipper for a binary tree.
// ??? Zipper[A] ???
// Get a zipper focussed on the root node.
def fromTree[A](bt: BinTree[A]): Zipper[A] = ???
// Get the complete tree from a zipper.
def toTree[A](zipper: Zipper[A]): BinTree[A] = ???
// Get the value of the focus node.
def value[A](zipper: Zipper[A]): A = ???
// Get the left child of the focus node, if any.
def left[A](zipper: Zipper[A]): Option[Zipper[A]] = ???
// Get the right child of the focus node, if any.
def right[A](zipper: Zipper[A]): Option[Zipper[A]] = ???
// Get the parent of the focus node, if any.
def up[A](zipper: Zipper[A]): Option[Zipper[A]] = ???
// Set the value of the focus node.
def setValue[A](v: A, zipper: Zipper[A]): Zipper[A] = ???
// Replace a left child tree.
def setLeft[A](l: Option[BinTree[A]], zipper: Zipper[A]): Zipper[A] = ???
// Replace a right child tree.
def setRight[A](r: Option[BinTree[A]], zipper: Zipper[A]): Zipper[A] = ???
}
// A binary tree.
case class BinTree[A](value: A, left: Option[BinTree[A]], right: Option[BinTree[A]])
| daewon/til | exercism/scala/zipper/src/main/scala/Zipper.scala | Scala | mpl-2.0 | 1,099 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author cuthbertm on 10/22/14 9:23 AM
*/
package com.webtrends.harness.component.cluster.communication
import java.net.URLDecoder
import akka.actor._
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import com.webtrends.harness.component.cluster.communication.MessageProcessor.Internal.{UpdateSubscriptions, Subscription}
import com.webtrends.harness.component.cluster.communication.MessageService.{Publish, Unsubscribe, Send, Subscribe}
import com.webtrends.harness.logging.ActorLoggingAdapter
import scala.concurrent.duration.{Deadline, FiniteDuration}
object MessagingTopicActor {
def props(selfAddress: Address, trashInterval: FiniteDuration, seed: Set[Subscription]): Props = {
Props(classOf[MessagingTopicActor], selfAddress, trashInterval, seed)
}
}
class MessagingTopicActor(selfAddress: Address, trashInterval: FiniteDuration, seed: Set[Subscription]) extends Actor
with ActorLoggingAdapter {
import context.dispatcher
// The name of this actor is actually the topic
val topic = URLDecoder.decode(self.path.name, "utf-8")
// Setup a recurring task that checks to see if we have no more subscribers.
// This helps to prevent the retention of resources that are not needed.
case object Sweep
val sweepInterval: FiniteDuration = trashInterval / 2
val sweepTask = context.system.scheduler.schedule(sweepInterval, sweepInterval, self, Sweep)
var trashDeadline: Option[Deadline] = None
// The list of subscribers
var registry = Map.empty[ActorRef, Boolean]
// Establish the routing logic
val routingLogic = RoundRobinRoutingLogic()
override def preStart: Unit = {
// Take the seed subscriptions and set them up
updateSubscriptions(seed)
log.debug(s"Message Topic Actor Started with " +
s"subscriptions: [${seed.map(_.subscriber.path.name).mkString(",")}]")
}
override def postStop(): Unit = {
sweepTask.cancel()
super.postStop()
}
def receive = {
case UpdateSubscriptions(subscriptions) => updateSubscriptions(subscriptions)
case Subscribe(_, ref, localOnly) => subscribe(ref, localOnly)
case Unsubscribe(_, ref) => unsubscribe(ref, terminated = false)
case Terminated(ref) => unsubscribe(ref, terminated = true)
case Publish(_, message) => publish(message)
case Send(_, message) => send(message)
case Sweep if trashDeadline.isDefined && trashDeadline.get.isOverdue =>
log.debug("The actor [{}] is stopping because there are no more subscriptions " +
"and it was scheduled for deletion", self.path)
context stop self
case Sweep => // Do nothing
}
/**
* Bulk update the subscriptions
*/
private def updateSubscriptions(subscriptions: Set[Subscription]): Unit = {
log.debug("The actor [{}] is updating it's subscriptions: {}",
self.path.name, subscriptions.map(_.subscriber.path.name).mkString(","))
// Unwatch all of the entries that are not in the passed in set
val removals = registry.keySet &~ subscriptions.map(_.subscriber)
removals foreach { sub =>
context unwatch sub
}
// Copy the subscriptions
registry = subscriptions.map(e => e.subscriber -> e.localOnly).toMap
if (registry.isEmpty) {
// If we have no more subscriptions then schedule the removal of this actor
trashDeadline = Some(Deadline.now + trashInterval)
}
else {
trashDeadline = None
registry foreach { sub =>
context watch sub._1
}
}
}
/**
* Add the subscription
* @param ref the actor to subscribe
*/
private def subscribe(ref: ActorRef, localOnly: Boolean): Unit = {
if (!registry.contains(ref)) {
log.info("The actor {} is subscribing to the topic {}", ref.path.toString, topic)
context watch ref
registry += (ref -> localOnly)
trashDeadline = None
}
}
/**
* Remove the subscription
* @param ref the actor to unsubscribe
* @param terminated is the unsubscribe due to a termination
*/
private def unsubscribe(ref: ActorRef, terminated: Boolean): Unit = {
if (registry.contains(ref)) {
if (terminated) {
log.info("The actor {} is un-subscribed to the topic {} since it has been terminated", ref.path.toString, topic)
}
else {
log.info("The actor {} is un-subscribing to the topic {}", ref.path.toString, topic)
}
context unwatch ref
registry -= ref
if (registry.isEmpty) {
// If we have no more subscriptions then schedule the removal of this actor
trashDeadline = Some(Deadline.now + trashInterval)
}
}
}
/**
* Forward the message to all subscribers
*/
private def publish(message: Any): Unit = {
for {
subscription <- registry
// Only send the message to internal actors or external ones that are registered for external publishes
if subscription._1.path.address.host.isEmpty || subscription._1.path.address == selfAddress || !subscription._2
} {
log.debug("Publishing message for '{}' to {}", topic, subscription._1.path)
subscription._1 forward message
}
}
private def send(message: Any): Unit = {
val routees = (for {
sub <- registry
} yield ActorRefRoutee(sub._1)).toVector
if (routees.nonEmpty) {
// Forward the message through our router
Router(routingLogic, routees).route(message, sender())
}
else {
log.warn("There are no subscribers available for the topic {}", topic)
}
}
}
| Webtrends/wookiee-cluster | src/main/scala/com/webtrends/harness/component/cluster/communication/MessagingTopicActor.scala | Scala | apache-2.0 | 6,239 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom
/**
* This package contains the query API traits. It contains both the purely abstract API traits as well as the
* partial implementation traits.
*
* '''Generic code abstracting over yaidom element implementations should either use
* trait `ClarkElemApi` or sub-trait `ScopedElemApi`, depending on the abstraction level.'''
*
* Most API traits are orthogonal, but some API traits are useful combinations of other ones. Examples include
* the above-mentioned `ClarkElemApi` and `ScopedElemApi` traits.
*
* This package depends only on the core package in yaidom, but many other packages do depend on this one.
*
* @author Chris de Vreeze
*/
package object queryapi
| EBPI/yaidom | src/main/scala/nl/ebpi/yaidom/queryapi/package.scala | Scala | apache-2.0 | 1,302 |
package client.appstate.groups.policies
import autowire._
import client.appstate.GroupPolicies
import client.services.AjaxClient
import diode.data._
import diode.{Effect, _}
import shared._
import shared.requests.groups.policies._
import boopickle.Default._
import shared.responses.groups.policies.PoliciesAssociatedToGroupInfo
import shared.utils.constants._
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.concurrent.Future
//Actions
// Group policies
case object ResetGroupPolicies extends Action
case class FetchGroupPolicies(request: PoliciesAssociatedToGroupRequest) extends Action
case class SetGroupPolicies(groupPolicies: Either[FoulkonError, (TotalGroupPolicies, List[PoliciesAssociatedToGroupInfo])]) extends Action
case class UpdateTotalGroupPoliciesAndPages(totalGroupPolicies: TotalGroupPolicies) extends Action
case class UpdateSelectedPage(selectedPage: SelectedPage) extends Action
class GroupPolicyHandler[M](modelRW: ModelRW[M, Pot[GroupPolicies]]) extends ActionHandler(modelRW) {
override protected def handle: PartialFunction[Any, ActionResult[M]] = {
case ResetGroupPolicies =>
updated(
Empty,
Effect(Future(UpdateTotalGroupPoliciesAndPages(0)))
>> Effect(Future(UpdateSelectedPage(0)))
)
case FetchGroupPolicies(request) =>
effectOnly(
Effect(
AjaxClient[Api].readPoliciesAssociatedToGroup(request)
.call
.map(SetGroupPolicies)
)
)
case SetGroupPolicies(groupPolicies) =>
groupPolicies match {
case rightResult @ Right((total, _)) =>
updated(
Ready(GroupPolicies(rightResult.map(_._2))),
Effect(Future(UpdateTotalGroupPoliciesAndPages(total)))
)
case leftResult @ Left(_) =>
updated(
Ready(GroupPolicies(leftResult.map(_._2))),
Effect(Future(UpdateTotalGroupPoliciesAndPages(0))) >> Effect(Future(UpdateSelectedPage(0)))
)
}
}
}
class GroupPoliciesPagesAndTotalHandler[M](modelRW: ModelRW[M, (TotalGroupPolicies, TotalPages, SelectedPage)]) extends ActionHandler(modelRW) {
override protected def handle: PartialFunction[Any, ActionResult[M]] = {
case UpdateTotalGroupPoliciesAndPages(totalGroupPolicies) =>
val totalPages = (totalGroupPolicies.toFloat / PageSize.toFloat).ceil.toInt
val stateSelectedPage = modelRW()._3
updated((totalGroupPolicies, totalPages, stateSelectedPage))
case UpdateSelectedPage(selectedPage) =>
updated(modelRW().copy(_3 = selectedPage))
}
}
| beikern/foulkon-ui | client/src/main/scala/client/appstate/groups/policies/DiodeGroupPolicy.scala | Scala | apache-2.0 | 2,603 |
/*
* Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.scala.chart.categoryplot.plotitem
import com.twosigma.beakerx.chart.xychart.plotitem.StrokeType
import org.junit.Test
import org.scalatest.Matchers._
class CategoryStemsPropertiesTest {
@Test
def style(): Unit = {
import StrokeType._
val stems = new CategoryStems
stems.style shouldBe stems.getStyle
stems.styles shouldBe empty
stems.style = LONGDASH
stems.style shouldBe LONGDASH
stems.style = Array(DASH, DOT, SOLID)
stems.styles shouldBe Seq(DASH, DOT, SOLID)
}
} | jpallas/beakerx | kernel/scala/src/test/scala/com/twosigma/beakerx/scala/chart/categoryplot/plotitem/CategoryStemsPropertiesTest.scala | Scala | apache-2.0 | 1,153 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.transformers
import java.util.Locale
import akka.testkit.TestActorRef
import akka.util.ByteString
import com.wegtam.tensei.adt.TransformerOptions
import com.wegtam.tensei.agent.ActorSpec
import com.wegtam.tensei.agent.transformers.BaseTransformer.{
PrepareForTransformation,
ReadyToTransform,
StartTransformation,
TransformerResponse
}
class LowerOrUpperTest extends ActorSpec {
describe("Transformer") {
describe("LowerOrUpper") {
describe("without src string") {
it("should return an empty string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
actor ! StartTransformation(List(), TransformerOptions(classOf[String], classOf[String]))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("without `perform` parameter") {
it("should return the string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
actor ! StartTransformation(List(ByteString("Foo")),
TransformerOptions(classOf[String], classOf[String]))
val response = TransformerResponse(List(ByteString("Foo")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with parameter `perform`") {
describe("with parameter `locale`") {
val locales = Locale.getAvailableLocales
locales.foreach { locale =>
describe(s"using locale ${locale.toLanguageTag}") {
describe("and `perform` = `lower`") {
it("should return the correct value") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "lower"), ("locale", locale.toLanguageTag))
actor ! StartTransformation(List(ByteString("Foo BAR")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("foo bar")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
system.stop(actor)
}
}
describe("and `perform` = `upper`") {
it("should return the correct value") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "upper"), ("locale", locale.toLanguageTag))
actor ! StartTransformation(List(ByteString("Foo BAR")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("FOO BAR")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
system.stop(actor)
}
}
describe("and `perform` = `firstupper`") {
it("should return the correct value") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstupper"), ("locale", locale.toLanguageTag))
actor ! StartTransformation(List(ByteString("Foo BAR")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("Foo BAR")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
system.stop(actor)
}
}
describe("and `perform` = `firstlower`") {
it("should return the correct value") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstlower"), ("locale", locale.toLanguageTag))
actor ! StartTransformation(List(ByteString("Foo BAR")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("foo BAR")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
system.stop(actor)
}
}
}
}
}
describe("without parameter `locale`") {
describe("`lower` value") {
describe("without src string") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "lower"))
actor ! StartTransformation(List(),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that already has lower characters") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "lower"))
actor ! StartTransformation(List(ByteString("foo")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("foo")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that has upper characters") {
it("should return the changed string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "lower"))
actor ! StartTransformation(List(ByteString("Foo BAR")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("foo bar")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
}
describe("`upper` value") {
describe("without src string") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "upper"))
actor ! StartTransformation(List(),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that already has upper characters") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "upper"))
actor ! StartTransformation(List(ByteString("FOO")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("FOO")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that has lower characters") {
it("should return the changed string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "upper"))
actor ! StartTransformation(List(ByteString("Foo bar")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("FOO BAR")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
}
describe("`firstlower` value") {
describe("without src string") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstlower"))
actor ! StartTransformation(List(),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that already has lower first character") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstlower"))
actor ! StartTransformation(List(ByteString("fOO")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("fOO")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that has upper first character") {
it("should return the changed string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstlower"))
actor ! StartTransformation(List(ByteString("Foo Bar")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("foo Bar")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
}
describe("`firstupper` value") {
describe("without src string") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstupper"))
actor ! StartTransformation(List(),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that already has upper first character") {
it("should return the unchanged string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstupper"))
actor ! StartTransformation(List(ByteString("FOO")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("FOO")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with valid src string that has lower first character") {
it("should return the changed string") {
val actor = TestActorRef(LowerOrUpper.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("perform", "firstupper"))
actor ! StartTransformation(List(ByteString("foo Bar")),
TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("Foo Bar")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
}
}
}
}
}
}
| Tensei-Data/tensei-agent | src/test/scala/com/wegtam/tensei/agent/transformers/LowerOrUpperTest.scala | Scala | agpl-3.0 | 17,273 |
/*
* Copyright 2013 websudos ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.phantom.dsl.crud
import com.websudos.phantom.Implicits._
import com.websudos.phantom.tables._
import com.websudos.phantom.testing.PhantomCassandraTestSuite
import com.websudos.util.testing._
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.SpanSugar._
class ListOperatorsTest extends PhantomCassandraTestSuite {
implicit val s: PatienceConfiguration.Timeout = timeout(10 seconds)
override def beforeAll(): Unit = {
super.beforeAll()
Recipes.insertSchema()
}
it should "store items in a list in the same order" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val list = genList[String]()
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list
}
}
}
it should "store items in a list in the same order with Twitter Futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val list = genList[String]()
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list
}
}
}
it should "store the same list size in Cassandra as it does in Scala" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val limit = 100
val list = genList[String](limit)
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list
items.get.size shouldEqual (limit - 1)
}
}
}
it should "store the same list size in Cassandra as it does in Scala with Twitter Futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val limit = 100
val list = genList[String](limit)
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list
items.get.size shouldEqual (limit - 1)
}
}
}
it should "append an item to a list" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients append "test").future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual recipe.ingredients ::: List("test")
}
}
}
it should "append an item to a list with Twitter futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients append "test").execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual recipe.ingredients ::: List("test")
}
}
}
it should "append several items to a list" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val appendable = List("test", "test2")
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients appendAll appendable).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual recipe.ingredients ::: appendable
}
}
}
it should "append several items to a list with Twitter futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val appendable = List("test", "test2")
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients appendAll appendable).execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual recipe.ingredients ::: appendable
}
}
}
it should "prepend an item to a list" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients prepend "test").future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual List("test") ::: recipe.ingredients
}
}
}
it should "prepend an item to a list with Twitter Futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients prepend "test").execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual List("test") ::: recipe.ingredients
}
}
}
it should "prepend several items to a list" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val appendable = List("test", "test2")
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients prependAll appendable).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual appendable.reverse ::: recipe.ingredients
}
}
}
it should "prepend several items to a list with Twitter futures" in {
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val appendable = List("test", "test2")
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients prependAll appendable).execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual appendable.reverse ::: recipe.ingredients
}
}
}
it should "remove an item from a list" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients discard list.head).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list.tail
}
}
}
it should "remove an item from a list with Twitter Futures" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients discard list.head).execute
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual list.tail
}
}
}
it should "remove multiple items from a list" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients discardAll list.tail).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual List(list.head)
}
}
}
it should "remove multiple items from a list with Twitter futures" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients discardAll list.tail).execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get shouldEqual List(list.head)
}
}
}
it should "set a 0 index inside a List" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients setIdx (0, "updated")).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get(0) shouldEqual "updated"
}
}
}
it should "set an index inside a List with Twitter futures" in {
val list = genList[String]()
val recipe = gen[Recipe].copy(ingredients = list)
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients setIdx (0, "updated")).execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get(0) shouldEqual "updated"
}
}
}
it should "set the third index inside a List" in {
val list = genList[String](100)
val recipe = gen[Recipe]
val id = gen[UUID]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.future()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients setIdx (3, "updated")).future()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).one
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get(3) shouldEqual "updated"
}
}
}
it should "set the third index inside a List with Twitter Futures" in {
val list = genList[String](100)
val recipe = gen[Recipe]
val id = gen[UUID]
val updated = gen[String]
val insert = Recipes.insert
.value(_.uid, id)
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, list)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.execute()
val operation = for {
insertDone <- insert
update <- Recipes.update.where(_.url eqs recipe.url).modify(_.ingredients setIdx (3, updated)).execute()
select <- Recipes.select(_.ingredients).where(_.url eqs recipe.url).get
} yield select
operation.successful {
items => {
items.isDefined shouldBe true
items.get(3) shouldEqual updated
}
}
}
}
| nosheenzaza/phantom-data-centric | phantom-dsl/src/test/scala/com/websudos/phantom/dsl/crud/ListOperatorsTest.scala | Scala | gpl-2.0 | 17,894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import java.io.File
import java.net.URI
import java.nio.file.Files
import java.util.UUID
import scala.language.implicitConversions
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.catalog.SessionCatalog.DEFAULT_DATABASE
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.FilterExec
import org.apache.spark.util.{UninterruptibleThread, Utils}
/**
* Helper trait that should be extended by all SQL test suites.
*
* This allows subclasses to plugin a custom `SQLContext`. It comes with test data
* prepared in advance as well as all implicit conversions used extensively by dataframes.
* To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
* Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtils
extends SparkFunSuite
with BeforeAndAfterAll
with SQLTestData { self =>
protected def sparkContext = spark.sparkContext
// Whether to materialize all test data before the first test is run
private var loadTestDataBeforeTests = false
// Shorthand for running a query using our SQLContext
protected lazy val sql = spark.sql _
/**
* A helper object for importing SQL implicits.
*
* Note that the alternative of importing `spark.implicits._` is not possible here.
* This is because we create the `SQLContext` immediately before the first test is run,
* but the implicits import is needed in the constructor.
*/
protected object testImplicits extends SQLImplicits {
protected override def _sqlContext: SQLContext = self.spark.sqlContext
}
/**
* Materialize the test data immediately after the `SQLContext` is set up.
* This is necessary if the data is accessed by name but not through direct reference.
*/
protected def setupTestData(): Unit = {
loadTestDataBeforeTests = true
}
protected override def beforeAll(): Unit = {
super.beforeAll()
if (loadTestDataBeforeTests) {
loadTestData()
}
}
/**
* Sets all SQL configurations specified in `pairs`, calls `f`, and then restore all SQL
* configurations.
*
* @todo Probably this method should be moved to a more general place
*/
protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
val (keys, values) = pairs.unzip
val currentValues = keys.map { key =>
if (spark.conf.contains(key)) {
Some(spark.conf.get(key))
} else {
None
}
}
(keys, values).zipped.foreach(spark.conf.set)
try f finally {
keys.zip(currentValues).foreach {
case (key, Some(value)) => spark.conf.set(key, value)
case (key, None) => spark.conf.unset(key)
}
}
}
/**
* Generates a temporary path without creating the actual file/directory, then pass it to `f`. If
* a file/directory is created there by `f`, it will be delete after `f` returns.
*
* @todo Probably this method should be moved to a more general place
*/
protected def withTempPath(f: File => Unit): Unit = {
val path = Utils.createTempDir()
path.delete()
try f(path) finally Utils.deleteRecursively(path)
}
/**
* Copy file in jar's resource to a temp file, then pass it to `f`.
* This function is used to make `f` can use the path of temp file(e.g. file:/), instead of
* path of jar's resource which starts with 'jar:file:/'
*/
protected def withResourceTempPath(resourcePath: String)(f: File => Unit): Unit = {
val inputStream =
Thread.currentThread().getContextClassLoader.getResourceAsStream(resourcePath)
withTempDir { dir =>
val tmpFile = new File(dir, "tmp")
Files.copy(inputStream, tmpFile.toPath)
f(tmpFile)
}
}
/**
* Creates a temporary directory, which is then passed to `f` and will be deleted after `f`
* returns.
*
* @todo Probably this method should be moved to a more general place
*/
protected def withTempDir(f: File => Unit): Unit = {
val dir = Utils.createTempDir().getCanonicalFile
try f(dir) finally Utils.deleteRecursively(dir)
}
/**
* Drops functions after calling `f`. A function is represented by (functionName, isTemporary).
*/
protected def withUserDefinedFunction(functions: (String, Boolean)*)(f: => Unit): Unit = {
try {
f
} catch {
case cause: Throwable => throw cause
} finally {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp tables that never got created.
functions.foreach { case (functionName, isTemporary) =>
val withTemporary = if (isTemporary) "TEMPORARY" else ""
spark.sql(s"DROP $withTemporary FUNCTION IF EXISTS $functionName")
assert(
!spark.sessionState.catalog.functionExists(FunctionIdentifier(functionName)),
s"Function $functionName should have been dropped. But, it still exists.")
}
}
}
/**
* Drops temporary table `tableName` after calling `f`.
*/
protected def withTempView(tableNames: String*)(f: => Unit): Unit = {
try f finally {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp tables that never got created.
try tableNames.foreach(spark.catalog.dropTempView) catch {
case _: NoSuchTableException =>
}
}
}
/**
* Drops table `tableName` after calling `f`.
*/
protected def withTable(tableNames: String*)(f: => Unit): Unit = {
try f finally {
tableNames.foreach { name =>
spark.sql(s"DROP TABLE IF EXISTS $name")
}
}
}
/**
* Drops view `viewName` after calling `f`.
*/
protected def withView(viewNames: String*)(f: => Unit): Unit = {
try f finally {
viewNames.foreach { name =>
spark.sql(s"DROP VIEW IF EXISTS $name")
}
}
}
/**
* Creates a temporary database and switches current database to it before executing `f`. This
* database is dropped after `f` returns.
*
* Note that this method doesn't switch current database before executing `f`.
*/
protected def withTempDatabase(f: String => Unit): Unit = {
val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}"
try {
spark.sql(s"CREATE DATABASE $dbName")
} catch { case cause: Throwable =>
fail("Failed to create temporary database", cause)
}
try f(dbName) finally {
if (spark.catalog.currentDatabase == dbName) {
spark.sql(s"USE ${DEFAULT_DATABASE}")
}
spark.sql(s"DROP DATABASE $dbName CASCADE")
}
}
/**
* Activates database `db` before executing `f`, then switches back to `default` database after
* `f` returns.
*/
protected def activateDatabase(db: String)(f: => Unit): Unit = {
spark.sessionState.catalog.setCurrentDatabase(db)
try f finally spark.sessionState.catalog.setCurrentDatabase("default")
}
/**
* Strip Spark-side filtering in order to check if a datasource filters rows correctly.
*/
protected def stripSparkFilter(df: DataFrame): DataFrame = {
val schema = df.schema
val withoutFilters = df.queryExecution.sparkPlan.transform {
case FilterExec(_, child) => child
}
spark.internalCreateDataFrame(withoutFilters.execute(), schema)
}
/**
* Turn a logical plan into a `DataFrame`. This should be removed once we have an easier
* way to construct `DataFrame` directly out of local data without relying on implicits.
*/
protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = {
Dataset.ofRows(spark, plan)
}
/**
* Disable stdout and stderr when running the test. To not output the logs to the console,
* ConsoleAppender's `follow` should be set to `true` so that it will honors reassignments of
* System.out or System.err. Otherwise, ConsoleAppender will still output to the console even if
* we change System.out and System.err.
*/
protected def testQuietly(name: String)(f: => Unit): Unit = {
test(name) {
quietly {
f
}
}
}
/**
* Run a test on a separate `UninterruptibleThread`.
*/
protected def testWithUninterruptibleThread(name: String, quietly: Boolean = false)
(body: => Unit): Unit = {
val timeoutMillis = 10000
@transient var ex: Throwable = null
def runOnThread(): Unit = {
val thread = new UninterruptibleThread(s"Testing thread for test $name") {
override def run(): Unit = {
try {
body
} catch {
case NonFatal(e) =>
ex = e
}
}
}
thread.setDaemon(true)
thread.start()
thread.join(timeoutMillis)
if (thread.isAlive) {
thread.interrupt()
// If this interrupt does not work, then this thread is most likely running something that
// is not interruptible. There is not much point to wait for the thread to termniate, and
// we rather let the JVM terminate the thread on exit.
fail(
s"Test '$name' running on o.a.s.util.UninterruptibleThread timed out after" +
s" $timeoutMillis ms")
} else if (ex != null) {
throw ex
}
}
if (quietly) {
testQuietly(name) { runOnThread() }
} else {
test(name) { runOnThread() }
}
}
/**
* This method is used to make the given path qualified, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
def makeQualifiedPath(path: String): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.makeQualified(hadoopPath).toUri
}
}
private[sql] object SQLTestUtils {
def compareAnswers(
sparkAnswer: Seq[Row],
expectedAnswer: Seq[Row],
sort: Boolean): Option[String] = {
def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
// This function is copied from Catalyst's QueryTest
val converted: Seq[Row] = answer.map { s =>
Row.fromSeq(s.toSeq.map {
case d: java.math.BigDecimal => BigDecimal(d)
case b: Array[Byte] => b.toSeq
case o => o
})
}
if (sort) {
converted.sortBy(_.toString())
} else {
converted
}
}
if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
val errorMessage =
s"""
| == Results ==
| ${sideBySide(
s"== Expected Answer - ${expectedAnswer.size} ==" +:
prepareAnswer(expectedAnswer).map(_.toString()),
s"== Actual Answer - ${sparkAnswer.size} ==" +:
prepareAnswer(sparkAnswer).map(_.toString())).mkString("\\n")}
""".stripMargin
Some(errorMessage)
} else {
None
}
}
}
| JerryLead/spark | sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala | Scala | apache-2.0 | 12,474 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.playjson
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream }
import java.nio.charset.StandardCharsets
import java.util.zip.{ GZIPInputStream, GZIPOutputStream }
import akka.actor.ExtendedActorSystem
import akka.event.Logging
import akka.serialization.{ BaseSerializer, SerializerWithStringManifest }
import play.api.libs.json._
import scala.annotation.tailrec
import scala.collection.immutable
/**
* Internal API
*
* Akka serializer using the registered play-json serializers and migrations
*/
private[lagom] final class PlayJsonSerializer(val system: ExtendedActorSystem, registry: JsonSerializerRegistry)
extends SerializerWithStringManifest
with BaseSerializer {
import Compression._
private val charset = StandardCharsets.UTF_8
private val log = Logging.getLogger(system, getClass)
private val conf = system.settings.config.getConfig("lagom.serialization.json")
private val isDebugEnabled = log.isDebugEnabled
private val compressLargerThan: Long = conf.getBytes("compress-larger-than")
/** maps a manifestClassName to a suitable play-json Format */
private val formatters: Map[String, Format[AnyRef]] = {
registry.serializers.map((entry: JsonSerializer[_]) =>
(entry.entityClass.getName, entry.format.asInstanceOf[Format[AnyRef]])).toMap
}
/** maps a manifestClassName to the serializer provided by the user */
private val serializers: Map[String, JsonSerializer[_]] = {
registry.serializers.map {
entry => entry.entityClass.getName -> entry
}.toMap
}
private def migrations: Map[String, JsonMigration] = registry.migrations
override def manifest(o: AnyRef): String = {
val className = o.getClass.getName
migrations.get(className) match {
case Some(migration) => className + "#" + migration.currentVersion
case None => className
}
}
override def toBinary(o: AnyRef): Array[Byte] = {
val startTime = if (isDebugEnabled) System.nanoTime else 0L
val (_, manifestClassName: String) = parseManifest(manifest(o))
val format = formatters.getOrElse(
manifestClassName,
throw new RuntimeException(s"Missing play-json serializer for [$manifestClassName]")
)
val json = format.writes(o)
val bytes: Array[Byte] = Json.stringify(json).getBytes(charset)
val result = serializers(manifestClassName) match {
case JsonSerializer.CompressedJsonSerializerImpl(_, _) if bytes.length > compressLargerThan => compress(bytes)
case _ => bytes
}
if (isDebugEnabled) {
val durationMicros = (System.nanoTime - startTime) / 1000
log.debug(
"Serialization of [{}] took [{}] µs, size [{}] bytes",
o.getClass.getName, durationMicros, result.length
)
}
result
}
override def fromBinary(storedBytes: Array[Byte], manifest: String): AnyRef = {
val startTime = if (isDebugEnabled) System.nanoTime else 0L
val (fromVersion: Int, manifestClassName: String) = parseManifest(manifest)
val renameMigration = migrations.get(manifestClassName)
val migratedManifest = renameMigration match {
case Some(migration) if migration.currentVersion > fromVersion =>
migration.transformClassName(fromVersion, manifestClassName)
case Some(migration) if migration.currentVersion < fromVersion =>
throw new IllegalStateException(s"Migration version ${migration.currentVersion} is " +
s"behind version $fromVersion of deserialized type [$manifestClassName]")
case _ => manifestClassName
}
val transformMigration = migrations.get(migratedManifest)
val format = formatters.getOrElse(
migratedManifest,
throw new RuntimeException(s"Missing play-json serializer for [$migratedManifest], " +
s"defined are [${formatters.keys.mkString(", ")}]")
)
val bytes =
if (isGZipped(storedBytes))
decompress(storedBytes)
else
storedBytes
val json = Json.parse(bytes) match {
case jsValue: JsValue => jsValue
case other =>
throw new RuntimeException("Unexpected serialized json data. " +
s"Expected a JSON object, but was [${other.getClass.getName}]")
}
val migratedJson = (transformMigration, json) match {
case (Some(migration), js: JsObject) if migration.currentVersion > fromVersion =>
migration.transform(fromVersion, js)
case (Some(migration), js: JsValue) if migration.currentVersion > fromVersion =>
migration.transformValue(fromVersion, js)
case _ => json
}
val result = format.reads(migratedJson) match {
case JsSuccess(obj, _) => obj
case JsError(errors) =>
throw new JsonSerializationFailed(
s"Failed to de-serialize bytes with manifest [$migratedManifest]",
errors,
migratedJson
)
}
if (isDebugEnabled) {
val durationMicros = (System.nanoTime - startTime) / 1000
log.debug(
"Deserialization of [{}] took [{}] µs, size [{}] bytes",
manifest, durationMicros, bytes.length
)
}
result
}
private def parseManifest(manifest: String) = {
val i = manifest.lastIndexOf('#')
val fromVersion = if (i == -1) 1 else manifest.substring(i + 1).toInt
val manifestClassName = if (i == -1) manifest else manifest.substring(0, i)
(fromVersion, manifestClassName)
}
}
// This code is copied from JacksonJsonSerializer
private[lagom] object Compression {
private final val BufferSize = 1024 * 4
def compress(bytes: Array[Byte]): Array[Byte] = {
val bos = new ByteArrayOutputStream(BufferSize)
val zip = new GZIPOutputStream(bos)
try zip.write(bytes)
finally zip.close()
bos.toByteArray
}
def decompress(bytes: Array[Byte]): Array[Byte] = {
val in = new GZIPInputStream(new ByteArrayInputStream(bytes))
val out = new ByteArrayOutputStream()
val buffer = new Array[Byte](BufferSize)
@tailrec def readChunk(): Unit = in.read(buffer) match {
case -1 ⇒ ()
case n ⇒
out.write(buffer, 0, n)
readChunk()
}
try readChunk()
finally in.close()
out.toByteArray
}
def isGZipped(bytes: Array[Byte]): Boolean = {
(bytes != null) && (bytes.length >= 2) &&
(bytes(0) == GZIPInputStream.GZIP_MAGIC.toByte) &&
(bytes(1) == (GZIPInputStream.GZIP_MAGIC >> 8).toByte)
}
}
| rstento/lagom | play-json/src/main/scala/com/lightbend/lagom/scaladsl/playjson/PlayJsonSerializer.scala | Scala | apache-2.0 | 6,501 |
package net.n12n.momo.couchbase
import akka.actor.{ActorLogging, Props, Actor}
import net.n12n.momo.util.RichConfig._
object MetricSampleActor {
case class Action(name: String, time: Long)
case object Tick
def props() = Props[MetricSampleActor]
}
class MetricSampleActor extends Actor with ActorMonitoring with ActorLogging {
import MetricSampleActor._
import context.system
import context.dispatcher
val parentName = context.parent.path.toStringWithoutAddress.replaceAll("[/$.]", "_")
val prefix = s"${seriesKeyPrefix}.${parentName}"
var metrics = scala.collection.mutable.HashMap[String, Metric]()
override def preStart(): Unit = {
system.scheduler.schedule(tickInterval, tickInterval, self, Tick)
}
override def receive = {
case Action(name, time) =>
metrics.get(name) match {
case Some(m) =>
m.count += 1
m.totalTime += time
case None =>
metrics.put(name, new Metric(1, time))
}
case Tick =>
metrics.foreach { e =>
e._2.toMetricPoints(s"${prefix}.${e._1}").foreach {
context.parent ! MetricActor.Save(_)
}
}
metrics = metrics.empty
}
class Metric(var count: Long, var totalTime: Long) {
def toMetricPoints(prefix: String): Seq[MetricPoint] = {
val now = System.currentTimeMillis()
val c = MetricPoint(s"${prefix}-count_g", now, count)
if (count > 0)
Seq(c, MetricPoint(s"${prefix}-processing-time_ms", now, totalTime / count))
else
Seq(c)
}
}
}
| ngrossmann/momo | src/main/scala/net/n12n/momo/couchbase/MetricSampleActor.scala | Scala | apache-2.0 | 1,549 |
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright 2016-2020 Daniel Urban and contributors listed in NOTICE.txt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.choam
package bench
package util
final class XorShift private (private[this] var state: Int) {
def nextInt(): Int = {
state ^= (state << 6)
state ^= (state >>> 21)
state ^= (state << 7)
state
}
def nextLong(): Long = {
val m = nextInt().toLong
val n = nextInt().toLong
m + (n << 32)
}
}
object XorShift {
def apply(): XorShift =
apply(java.util.concurrent.ThreadLocalRandom.current().nextInt())
def apply(seed: Int): XorShift =
new XorShift(seed)
}
| durban/exp-reagents | bench/src/main/scala/dev/tauri/choam/bench/util/XorShift.scala | Scala | apache-2.0 | 1,200 |
package specs.fast
import org.scalacheck.{Arbitrary, Gen}
import scala.util.Random
import org.json4s.ast.fast._
object Generators {
def jIntGenerator = Arbitrary.arbitrary[BigInt].map(JNumber.apply)
def jStringGenerator = Arbitrary.arbitrary[String].map(JString.apply)
def jBooleanGenerator = Arbitrary.arbitrary[Boolean].map(JBoolean.apply)
def jFieldGenerator: Gen[JField] = for {
string <- Arbitrary.arbitrary[String]
jValue <- jValueGenerator
} yield JField(string, jValue)
def jArrayGenerator: Gen[JArray] =
Gen.containerOf[Array,JValue](jValueGenerator).map(JArray.apply)
def jObjectGenerator: Gen[JObject] =
Gen.containerOf[Array,JField](jFieldGenerator).map(JObject.apply)
def jValueGenerator: Gen[JValue] = for {
jInt <- jIntGenerator
jString <- jStringGenerator
jBoolean <- jBooleanGenerator
jArray <- jArrayGenerator
jObject <- jObjectGenerator
} yield {
val ran = Seq(jInt, jString, jBoolean, jArray, jObject)
ran(Random.nextInt(ran.size))
}
}
| json4s/json4s-ast | jvm/src/test/scala/specs/fast/Generators.scala | Scala | apache-2.0 | 1,029 |
/*******************************************************************************
Copyright (c) 2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMCore
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
object DOMLocator extends DOM {
private val name = "DOMLocator"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* constructor or object*/
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getDefMap(): Map[String, AccessFun] = {
Map()
}
def getUseMap(): Map[String, AccessFun] = {
Map()
}
/* semantics */
// no function
/* instance */
//def instantiate() = Unit // not yet implemented
// intance of DOMTypeInfo should have 'lineNumber', 'columnNumber', 'byteOffset', 'utf16Offset', 'relatedNode', 'uri' property
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMCore/DOMLocator.scala | Scala | bsd-3-clause | 2,573 |
package i0
package i1 {}
object i2 {
val i3: Any
}
abstract class i4 extends i0 {
override type i1 = Nothing
type i3 <: i2
type i4 <: i2
type i5 <: i3
type i6 <: i2
type i7 = { val i8: i4 }
val i9 = { val i1: i2 }
type i11 = List[this, i7]
trait i12 {
type i16 <: i11;
class i16 extends i1 {
type i2 <: i1
type i7 <: i5.i4 {
type i8
type i13 <: this.type
def i12(i13: i0) = i16 match {
case _: i7 =>
}
}
}
package i14 { abstract class i1 {
type i2 <: Singleton with i4 {
def i5(i9: i6): i6 = new i4(i5)
}
val i10: i8 = ???
implicit def i10 = i9
}
class i16 extends i4 {
type i1
val i10: Int = new i6
val i9 = new
val i12 = ''
}
class i16 extends i0 {}
class i2() extends i10
object i9 {
def i10(i11: i4): String =
super.i2 {}
} | lampepfl/dotty | tests/fuzzy/597a43d8b9d210fe6fb04bf3059811776e7f20d9.scala | Scala | apache-2.0 | 727 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.util
object ScriptingUtils {
import scala.reflect.runtime._
import scala.tools.reflect.ToolBox
def eval[T](code: String, imports: Seq[String] = Seq("cc.factorie._")): T = {
val cm = universe.runtimeMirror(getClass.getClassLoader)
val tb = cm.mkToolBox()
val script = imports.map("import " + _).mkString("\n") + "\n{\n" + code + "\n}"
tb.eval(tb.parse(script)).asInstanceOf[T]
}
def main(args: Array[String]): Unit = {
val res = eval[String](
"""
val tmp = "asd" + "foo"
tmp + "!"
""")
println(res)
val res2 = eval[cc.factorie.optimize.GradientOptimizer]("import cc.factorie.optimize._; new AdaGrad(rate = 0.1)")
println(res2)
val res3 = eval[cc.factorie.optimize.GradientOptimizer]("new AdaGrad(rate = 0.1)", Seq("cc.factorie.optimize._"))
println(res3)
}
}
| hlin117/factorie | src/main/scala/cc/factorie/util/ScriptingUtils.scala | Scala | apache-2.0 | 1,591 |
package unfiltered.response
import unfiltered.Cookie
case class ResponseCookies(cookies: Cookie*) extends Responder[Any] {
def respond(res: HttpResponse[Any]) = res.cookies(cookies)
}
| softprops/Unfiltered | library/src/main/scala/response/cookies.scala | Scala | mit | 188 |
package org.scalex
package object model extends instances {
type TypeEntity = String
type Flag = String
type Variance = String
type Expression = String
type QualifiedName = String
}
| ornicar/scalex | src/main/scala/model/package.scala | Scala | mit | 194 |
package chandu0101.scalajs.react.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.Reusability
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom
import org.scalajs.dom._
import scala.scalajs.js
import scalacss.Defaults._
import scalacss.ScalaCssReact._
case class RPoint(x: Double, y: Double)
case class RGrid(width: Double, height: Double)
case class RElementPosition(element: Element, top: Double = 0, left: Double = 0, right: Double = 0, bottom: Double = 0)
case class ClientRect(top: Double,left: Double)
object ReactDraggable {
object Style extends StyleSheet.Inline {
import dsl._
val draggable = style(position.relative)
val draggableActive = style(userSelect := "none")
}
import RCustomStyles._
type CssClassType = Map[String, Boolean]
object DomUtil {
def offset(element: TopNode) = {
val rect = element.getBoundingClientRect()
var scrollTop = 0.0
var scrollLeft = 0.0
if(dom.document.body.scrollTop > 0) {
scrollTop = dom.document.body.scrollTop.toInt
scrollLeft = dom.document.body.scrollLeft.toInt
} else if(dom.document.documentElement.scrollTop >0) { // for firefox
scrollTop = dom.document.documentElement.scrollTop
scrollLeft = dom.document.documentElement.scrollLeft
}
ClientRect(rect.top + scrollTop, rect.left + scrollLeft )
}
/**
* https://developer.mozilla.org/en-US/docs/Web/API/Element.matches#Browser_compatibility
* @param element dom element
* @param selector css selector
*/
def matchesSelector(element: js.Dynamic)(selector: String): Boolean = {
val funcName = Stream("matches",
"webkitMatchesSelector",
"mozMatchesSelector",
"msMatchesSelector",
"oMatchesSelector").filter(name => !js.isUndefined(element.selectDynamic(name))).head
element.selectDynamic(funcName).call(element,selector).asInstanceOf[Boolean]
}
/**
* onTouchStart - / works on most browsers
* onmsgesturechange - works on ie10 on ms surface
* @return whether device is touch enabled or not
*/
def isTouchDevice = dom.window.hasOwnProperty("ontouchstart") || dom.window.hasOwnProperty("onmsgesturechange")
def dragEventFor(e:Event, name: String) = name match {
case "start" => if (e.`type`.contains("touch")) "touchstart" else "mousedown"
case "move" => if (e.`type`.contains("touch")) "touchmove" else "mousemove"
case "end" => if (e.`type`.contains("touch")) "touchend" else "mouseup"
}
def getControlPosition(e: Event): RPoint =
if (e.`type`.contains("touch")) {
val position = e.asInstanceOf[TouchEvent].touches(0)
RPoint(position.clientX, position.clientY)
} else {
val position = e.asInstanceOf[MouseEvent]
RPoint(position.clientX, position.clientY)
}
def isLeftClick(e: Event) =
e.`type` == "touchstart" || e.asInstanceOf[MouseEvent].button == 0
}
case class Props(cancel: js.UndefOr[String],
onDrag: js.UndefOr[(Event, RElementPosition) => Callback],
useCSSTransforms: Boolean,
clsNames: CssClassType,
ref: js.UndefOr[String],
moveOnStartChange: Boolean,
grid: js.UndefOr[RGrid],
key: js.Any,
zIndex: Int,
axis: String,
onStop: js.UndefOr[(Event, RElementPosition) => Callback],
start: RPoint,
onStart: js.UndefOr[(Event, RElementPosition) => Callback],
onMouseDown: js.UndefOr[Event => Callback],
handle: js.UndefOr[String],
minConstraints: js.UndefOr[RGrid],
maxConstraints: js.UndefOr[RGrid])
/**
* @param dragging whether or not currently dragging
* @param startX Start left of t.getDOmNode()
* @param startY Start top of t.getDOmNode()
* @param offsetX Offset between start left and mouse left
* @param offsetY Offset between start top and mouse top
* @param clientX Current left of this.getDOMNode()
* @param clientY Current top of this.getDOMNode()
*/
case class State(
dragging: Boolean,
startX: Int,
startY: Int,
offsetX: Int,
offsetY: Int,
clientX: Int,
clientY: Int,
stopListening: js.UndefOr[Callback]
)
implicit val r0 = Reusability.byRef[Props]
implicit val r1 = Reusability.byRef[State]
class Backend(t: BackendScope[Props, State]) {
def pos(S: State) =
RElementPosition(t.getDOMNode(), top = S.clientY, left = S.clientX)
def handleDragStart(P: Props)(e: Event): Callback = {
val moveEventType = DomUtil.dragEventFor(e, "move")
val endEventType = DomUtil.dragEventFor(e, "end")
val dragPoint = DomUtil.getControlPosition(e)
val mouseDown: Callback =
P.onMouseDown.asCbo(e)
val onStart: Callback =
t.state.flatMap(S => P.onStart.asCbo(e, pos(S)))
val startDrag = t.modState { S =>
val u1 = Events.register(dom.window, moveEventType, handleDrag(P))
val u2 = Events.register(dom.window, endEventType, handleDragEnd(P))
S.copy(
dragging = true,
offsetX = dragPoint.x.toInt - S.clientX,
offsetY = dragPoint.y.toInt - S.clientY,
stopListening = u1 >> u2
)
}
val matches: Boolean = {
val matchesTarget = DomUtil.matchesSelector(e.target.asInstanceOf[js.Dynamic]) _
P.handle.fold(true)(matchesTarget) && P.cancel.fold(true)(matchesTarget)
}
mouseDown << (onStart >> startDrag).conditionally(DomUtil.isLeftClick(e) && matches).void
}
def handleDrag(P: Props)(e: Event): Callback = {
val dragPoint = DomUtil.getControlPosition(e)
val c1 = t.modState{ S =>
// calculate top and left
var clientX = S.startX + (dragPoint.x - S.offsetX)
var clientY = S.startY + (dragPoint.y - S.offsetY)
// Snap to grid if prop has been provided
P.grid.foreach{ (grid: RGrid) =>
val directionX = if (clientX < S.clientX) -1 else 1
val directionY = if (clientY < S.clientY) -1 else 1
clientX =
if (math.abs(clientX - S.clientX) >= grid.width)
S.clientX + (grid.width * directionX)
else S.clientX
clientY =
if (math.abs(clientY - S.clientY) >= grid.height)
S.clientY + (grid.height * directionY)
else S.clientY
}
//min/max contraints
P.minConstraints.foreach{ (min: RGrid) =>
clientX = math.max(min.width, clientX)
clientY = math.max(min.height, clientY)
}
P.maxConstraints.foreach{ (max: RGrid) =>
clientX = math.min(max.width, clientX)
clientY = math.min(max.height, clientY)
}
// Update top and left
S.copy(clientX = clientX.toInt, clientY = clientY.toInt)
}
//call event handler
val c2 = t.state.flatMap(S => P.onDrag.asCbo(e, pos(S)))
c1 >> c2
}
def handleDragEnd(P: Props)(e: Event): Callback = {
val unregister: Callback =
t.state.flatMap(_.stopListening.asCbo)
val onStop: Callback =
t.state.flatMap(S => P.onStop.asCbo(e, pos(S)))
val stopDragging: Callback =
t.modState(_.copy(dragging = false, stopListening = js.undefined))
unregister >> onStop >> stopDragging
}
def canDragY(P: Props): Boolean =
P.axis == "both" || P.axis == "y"
def canDragX(P: Props): Boolean =
P.axis == "both" || P.axis == "x"
private val transforms = Seq(^.transform, mozTransform, WebkitTransform, msTransform)
def positionToCSSTransform(left: Int, top: Int): TagMod =
transforms map (_ := s"translate(${left}px, ${top}px)")
def render(P: Props, S: State, C: PropsChildren) = {
val topValue: Int =
if (canDragY(P)) S.clientY else S.startY
val leftValue: Int =
if (canDragX(P)) S.clientX else S.startX
val stl: TagMod =
if (P.useCSSTransforms) positionToCSSTransform(leftValue, topValue)
else Seq(^.top := topValue, ^.left := leftValue)
<.div(
Style.draggable,
S.dragging ?= Style.draggableActive,
stl,
^.onMouseDown ==> handleDragStart(P),
^.onTouchStart ==> handleDragStart(P),
^.onMouseUp ==> handleDragEnd(P),
^.onTouchEnd ==> handleDragEnd(P)
)(C)
}
}
def newStateFrom(P: Props): State =
State(
dragging = false,
startX = 0,
startY = 0,
clientX = P.start.x.toInt,
clientY = P.start.y.toInt,
offsetX = 0,
offsetY = 0,
stopListening = js.undefined
)
val component = ReactComponentB[Props]("ReactDraggable")
.initialState_P(newStateFrom)
.renderBackend[Backend]
.componentWillReceiveProps{
case ComponentWillReceiveProps(_$, nextProps) =>
_$.setState(newStateFrom(nextProps)).conditionally(nextProps.moveOnStartChange).void
}
.configure(Reusability.shouldComponentUpdate)
.componentWillUnmount($ => $.state.stopListening.getOrElse(Callback.empty))
.build
/**
*
* @param cancel specifies a selector to be used to prevent drag initialization.
* @param onDrag Called while dragging
* @param useCSSTransforms if true will place the element using translate(x, y)
* rather than CSS top/left.
* This generally gives better performance, and is useful in combination with
* other layout systems that use translate(), such as react-grid-layout.
* @param clsNames css class names map
* @param ref ref for this component
* @param moveOnStartChange tells the Draggable element to reset its position
* if the `start` parameters are changed. By default, if the `start`
* parameters change, the Draggable element still remains where it started
* or was dragged to.
* @param grid specifies the x and y that dragging should snap to.
* @param key key for this react component
* @param zIndex specifies the zIndex to use while dragging.
* @param axis determines which axis the draggable can move.(both,x,y)
* @param onStop Called when dragging stops
* @param start specifies the x and y that the dragged item should start at
* @param onStart Called when dragging starts.
* @param onMouseDown * A workaround option which can be passed if onMouseDown needs to be accessed,
* since it'll always be blocked (due to that there's internal use of onMouseDown)
* @param handle specifies a selector to be used as the handle that initiates drag.
* @param children
* @return
*/
def apply(cancel: js.UndefOr[String] = js.undefined,
onDrag: js.UndefOr[(Event, RElementPosition) => Callback] = js.undefined,
useCSSTransforms: Boolean = false,
clsNames: CssClassType = Map(),
ref: js.UndefOr[String] = js.undefined,
moveOnStartChange: Boolean = false,
grid: js.UndefOr[RGrid] = js.undefined,
key: js.Any = {},
zIndex: Int = 0,
axis: String = "both",
onStop: js.UndefOr[(Event, RElementPosition) => Callback] = js.undefined,
start: RPoint = RPoint(0, 0),
onStart: js.UndefOr[(Event, RElementPosition) => Callback] = js.undefined,
onMouseDown: js.UndefOr[Event => Callback] = js.undefined,
handle: js.UndefOr[String] = js.undefined,
minConstraints: js.UndefOr[RGrid] = js.undefined,
maxConstraints: js.UndefOr[RGrid] = js.undefined)
(children: ReactNode) =
component.set(key, ref)(
Props(
cancel = cancel,
onDrag = onDrag,
useCSSTransforms = useCSSTransforms,
clsNames = clsNames,
ref = ref,
moveOnStartChange = moveOnStartChange,
grid = grid,
key = key,
zIndex = zIndex,
axis = axis,
onStop = onStop,
start = start,
onStart = onStart,
onMouseDown = onMouseDown,
handle = handle,
minConstraints = minConstraints,
maxConstraints = maxConstraints
),
children
)
}
| elacin/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/ReactDraggable.scala | Scala | apache-2.0 | 12,573 |
package pl.msitko.xml.dsl
trait Examples {
val inputWithComments =
"""<?xml version="1.0" encoding="UTF-8"?>
|<a>
| <c1>
| <f>item1</f>
| <g>item2</g>
| </c1>
| <c1>
| <!--
|something
|something more
|even more
|
|-
|-->
| <f>item1</f>
| <h>item2</h>
| </c1>
|</a>""".stripMargin
val outputWithComments =
"""<?xml version="1.0" encoding="UTF-8"?>
|<a><c1><f>item1</f><g>item2</g></c1><c1><!--
|something
|something more
|even more
|
|-
|--><f>item1</f><h>item2</h></c1></a>""".stripMargin
}
| note/xml-lens | optics/shared/src/test/scala/pl/msitko/xml/dsl/Examples.scala | Scala | mit | 681 |
// @SOURCE:E:/GitHub/Backgammon-Web-master_for_2.1.5/Backgammon-Web-master/conf/routes
// @HASH:353440427f3d1a0ce23433309259e3c33c6493ec
// @DATE:Mon Jan 13 10:56:46 CET 2014
import play.core._
import play.core.Router._
import play.core.j._
import play.api.mvc._
import play.libs.F
import Router.queryString
object Routes extends Router.Routes {
private var _prefix = "/"
def setPrefix(prefix: String) {
_prefix = prefix
List[(String,Routes)]().foreach {
case (p, router) => router.setPrefix(prefix + (if(prefix.endsWith("/")) "" else "/") + p)
}
}
def prefix = _prefix
lazy val defaultPrefix = { if(Routes.prefix.endsWith("/")) "" else "/" }
// @LINE:5
private[this] lazy val controllers_Application_index0 = Route("GET", PathPattern(List(StaticPart(Routes.prefix))))
// @LINE:6
private[this] lazy val controllers_Application_click1 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("game"))))
// @LINE:7
private[this] lazy val controllers_Application_json2 = Route("POST", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("json"))))
// @LINE:10
private[this] lazy val controllers_Assets_at3 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("assets/"),DynamicPart("file", """.+""",false))))
def documentation = List(("""GET""", prefix,"""controllers.Application.index()"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """game""","""@controllers.Application@.click()"""),("""POST""", prefix + (if(prefix.endsWith("/")) "" else "/") + """json""","""@controllers.Application@.json()"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """assets/$file<.+>""","""controllers.Assets.at(path:String = "/public", file:String)""")).foldLeft(List.empty[(String,String,String)]) { (s,e) => e.asInstanceOf[Any] match {
case r @ (_,_,_) => s :+ r.asInstanceOf[(String,String,String)]
case l => s ++ l.asInstanceOf[List[(String,String,String)]]
}}
def routes:PartialFunction[RequestHeader,Handler] = {
// @LINE:5
case controllers_Application_index0(params) => {
call {
invokeHandler(controllers.Application.index(), HandlerDef(this, "controllers.Application", "index", Nil,"GET", """""", Routes.prefix + """"""))
}
}
// @LINE:6
case controllers_Application_click1(params) => {
call {
invokeHandler(play.api.Play.maybeApplication.map(_.global).getOrElse(play.api.DefaultGlobal).getControllerInstance(classOf[controllers.Application]).click(), HandlerDef(this, "controllers.Application", "click", Nil,"GET", """""", Routes.prefix + """game"""))
}
}
// @LINE:7
case controllers_Application_json2(params) => {
call {
invokeHandler(play.api.Play.maybeApplication.map(_.global).getOrElse(play.api.DefaultGlobal).getControllerInstance(classOf[controllers.Application]).json(), HandlerDef(this, "controllers.Application", "json", Nil,"POST", """""", Routes.prefix + """json"""))
}
}
// @LINE:10
case controllers_Assets_at3(params) => {
call(Param[String]("path", Right("/public")), params.fromPath[String]("file", None)) { (path, file) =>
invokeHandler(controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]),"GET", """ Map static resources from the /public folder to the /assets URL path""", Routes.prefix + """assets/$file<.+>"""))
}
}
}
}
| bfcmyxa/Backgammon-Web-master_for_2.1.5 | target/scala-2.10/src_managed/main/routes_routing.scala | Scala | gpl-2.0 | 3,559 |
package dotty.tools.dotc
package transform
import core._
import Contexts._, Symbols._, Types._, Flags._, Decorators._, StdNames._, Constants._
import SymDenotations.SymDenotation
import TreeTransforms._
import SymUtils._
import ast.untpd
import ast.Trees._
/** Expand SAM closures that cannot be represented by the JVM as lambdas to anonymous classes.
* These fall into five categories
*
* 1. Partial function closures, we need to generate a isDefinedAt method for these.
* 2. Closures implementing non-trait classes.
* 3. Closures implementing classes that inherit from a class other than Object
* (a lambda cannot not be a run-time subtype of such a class)
* 4. Closures that implement traits which run initialization code.
* 5. Closures that get synthesized abstract methods in the transformation pipeline. These methods can be
* (1) superaccessors, (2) outer references, (3) accessors for fields.
*/
class ExpandSAMs extends MiniPhaseTransform { thisTransformer =>
override def phaseName = "expandSAMs"
import ast.tpd._
/** Is SAMType `cls` also a SAM under the rules of the JVM? */
def isJvmSam(cls: ClassSymbol)(implicit ctx: Context): Boolean =
cls.is(NoInitsTrait) &&
cls.superClass == defn.ObjectClass &&
cls.directlyInheritedTraits.forall(_.is(NoInits)) &&
!ExplicitOuter.needsOuterIfReferenced(cls) &&
cls.typeRef.fields.isEmpty // Superaccessors already show up as abstract methods here, so no test necessary
override def transformBlock(tree: Block)(implicit ctx: Context, info: TransformerInfo): Tree = tree match {
case Block(stats @ (fn: DefDef) :: Nil, Closure(_, fnRef, tpt)) if fnRef.symbol == fn.symbol =>
tpt.tpe match {
case NoType => tree // it's a plain function
case tpe @ SAMType(_) if isJvmSam(tpe.classSymbol.asClass) =>
if (tpe isRef defn.PartialFunctionClass) toPartialFunction(tree)
else tree
case tpe =>
cpy.Block(tree)(stats,
AnonClass(tpe :: Nil, fn.symbol.asTerm :: Nil, nme.apply :: Nil))
}
case _ =>
tree
}
private def toPartialFunction(tree: Block)(implicit ctx: Context, info: TransformerInfo): Tree = {
val Block(
(applyDef @ DefDef(nme.ANON_FUN, Nil, List(List(param)), _, _)) :: Nil,
Closure(_, _, tpt)) = tree
val applyRhs: Tree = applyDef.rhs
val applyFn = applyDef.symbol.asTerm
val MethodType(paramNames, paramTypes) = applyFn.info
val isDefinedAtFn = applyFn.copy(
name = nme.isDefinedAtImpl,
flags = Synthetic | Method,
info = MethodType(paramNames, paramTypes, defn.BooleanType)).asTerm
val tru = Literal(Constant(true))
def isDefinedAtRhs(paramRefss: List[List[Tree]]) = applyRhs match {
case Match(selector, cases) =>
assert(selector.symbol == param.symbol)
val paramRef = paramRefss.head.head
// Again, the alternative
// val List(List(paramRef)) = paramRefs
// fails with a similar self instantiation error
def translateCase(cdef: CaseDef): CaseDef =
cpy.CaseDef(cdef)(body = tru).changeOwner(applyFn, isDefinedAtFn)
val defaultSym = ctx.newSymbol(isDefinedAtFn, nme.WILDCARD, Synthetic, selector.tpe.widen)
val defaultCase =
CaseDef(
Bind(defaultSym, Underscore(selector.tpe.widen)),
EmptyTree,
Literal(Constant(false)))
cpy.Match(applyRhs)(paramRef, cases.map(translateCase) :+ defaultCase)
case _ =>
tru
}
val isDefinedAtDef = transformFollowingDeep(DefDef(isDefinedAtFn, isDefinedAtRhs(_)))
val anonCls = AnonClass(tpt.tpe :: Nil, List(applyFn, isDefinedAtFn), List(nme.apply, nme.isDefinedAt))
cpy.Block(tree)(List(applyDef, isDefinedAtDef), anonCls)
}
}
| folone/dotty | src/dotty/tools/dotc/transform/ExpandSAMs.scala | Scala | bsd-3-clause | 3,821 |
/*
* Copyright 2016 by Simba Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.spark.sql.simba.execution.join
import org.apache.spark.sql.simba.execution.SimbaPlan
import org.apache.spark.sql.simba.partitioner.MapDPartition
import org.apache.spark.sql.simba.spatial.Point
import org.apache.spark.sql.simba.util.{NumberUtil, ShapeUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, JoinedRow, Literal}
import org.apache.spark.sql.execution.SparkPlan
import scala.collection.mutable
import scala.util.Random
/**
* Created by dongx on 11/11/16.
*/
case class BDJSpark(left_key: Expression, right_key: Expression, l: Literal,
left: SparkPlan, right: SparkPlan) extends SimbaPlan {
override def output: Seq[Attribute] = left.output ++ right.output
final val num_partitions = simbaSessionState.simbaConf.joinPartitions
final val r = NumberUtil.literalToDouble(l)
override protected def doExecute(): RDD[InternalRow] = {
val tot_rdd = left.execute().map((0, _)).union(right.execute().map((1, _)))
val tot_dup_rdd = tot_rdd.flatMap {x =>
val rand_no = new Random().nextInt(num_partitions)
var ans = mutable.ListBuffer[(Int, (Int, InternalRow))]()
if (x._1 == 0) {
val base = rand_no * num_partitions
for (i <- 0 until num_partitions)
ans += ((base + i, x))
} else {
for (i <- 0 until num_partitions)
ans += ((i * num_partitions + rand_no, x))
}
ans
}
val tot_dup_partitioned = MapDPartition(tot_dup_rdd, num_partitions * num_partitions)
tot_dup_partitioned.mapPartitions {iter =>
var left_data = mutable.ListBuffer[(Point, InternalRow)]()
var right_data = mutable.ListBuffer[(Point, InternalRow)]()
while (iter.hasNext) {
val data = iter.next()
if (data._2._1 == 0) {
val tmp_point = ShapeUtils.getShape(left_key, left.output, data._2._2).asInstanceOf[Point]
left_data += ((tmp_point, data._2._2))
} else {
val tmp_point = ShapeUtils.getShape(right_key, right.output, data._2._2).asInstanceOf[Point]
right_data += ((tmp_point, data._2._2))
}
}
val joined_ans = mutable.ListBuffer[InternalRow]()
left_data.foreach {left =>
right_data.foreach {right =>
if (left._1.minDist(right._1) <= r) {
joined_ans += new JoinedRow(left._2, right._2)
}
}
}
joined_ans.iterator
}
}
override def children: Seq[SparkPlan] = Seq(left, right)
} | InitialDLab/Simba | src/main/scala/org/apache/spark/sql/simba/execution/join/BDJSpark.scala | Scala | apache-2.0 | 3,189 |
package models
import play.api.mvc.PathBindable
import uk.gov.hmrc.domain.EmpRef
import scala.util.{Failure, Success, Try}
object EmprefBindable extends PathBindable[EmpRef] {
override def bind(key: String, value: String): Either[String, EmpRef] = Try(EmpRef.fromIdentifiers(value)) match {
case Success(e) => Right(e)
case Failure(t) => Left(t.getMessage)
}
override def unbind(key: String, e: EmpRef): String = s"${e.taxOfficeNumber}/${e.taxOfficeReference}"
}
object PlayBindings {
implicit val emprefBindable = EmprefBindable
} | UKGovernmentBEIS/das-alpha-client-mock | src/main/scala/models/PlayBindings.scala | Scala | mit | 552 |
package breeze.linalg.support.codegen
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.linalg.operators._
import java.io.{File, FileOutputStream, PrintStream}
/**
* This class (and all classes in this package) are ugly spaghetti code for
* generating the customized loops for all the operators. I'm sorry it's so messy.
* @author dlwh
*/
object GenOperators {
def genBinaryAdaptor(name: String, typeA: String, typeB: String, op: OpType, typeR: String, from: String) = (
"""implicit val %s: BinaryOp[%s, %s, %s, %s] = %s""".format(
name,
typeA,
typeB,
op.getClass.getName.replaceAll("[$]",""),
typeR,
from
)
)
def genBinaryRegistryAdaptor(name: String, typeA: String, typeB: String, op: OpType, typeR: String, from: String) = (
"""implicit val %s: BinaryRegistry[%s, %s, %s, %s] = %s""".format(
name,
typeA,
typeB,
op.getClass.getSimpleName.replaceAll("[$]",""),
typeR,
from
)
)
def genBinaryRegistryAdaptorDef(name: String, typeA: String, typeB: String, op: OpType, typeR: String, from: String) = (
"""val Name: BinaryRegistry[TypeA, TypeB, TypeOp, TypeR] = From
implicit def Name_def[A <: TypeA, B <: TypeB]:BinaryOp[A, B, TypeOp, TypeR] = From.asInstanceOf[BinaryOp[A, B, TypeOp, TypeR]]
""".replaceAll("Name",name)
.replaceAll("TypeA", typeA)
.replaceAll("TypeB", typeB)
.replaceAll("TypeOp", op.getClass.getSimpleName.replaceAll("[$]",""))
.replaceAll("TypeR", typeR)
.replaceAll("From",from)
)
def genBinaryUpdateOperator(name: String, typeA: String, typeB: String, op: OpType)(loop: String) = {
"""
class Name private[linalg] () extends BinaryUpdateOp[TypeA, TypeB, TypeOp] {
def apply(a: TypeA, b: TypeB) {
LOOP
}
}
implicit val Name = new Name ()
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryUpdateOperatorDef(name: String, typeA: String, typeB: String, op: OpType)(loop: String) = {
"""
class Name private[linalg] () extends BinaryUpdateOp[TypeA, TypeB, TypeOp] {
def apply(a: TypeA, b: TypeB) {
LOOP
}
}
val Name = new Name ()
implicit def Name_def[A <: TypeA, B <: TypeB]:BinaryUpdateOp[A, B, TypeOp] = (
Name.asInstanceOf[BinaryUpdateOp[A, B, TypeOp]]
)
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getSimpleName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryOperator(name: String, typeA: String, typeB: String, op: OpType, result: String)(loop: String) = {
"""
class Name private[linalg] () extends BinaryOp[TypeA, TypeB, TypeOp, Result] {
def apply(a: TypeA, b: TypeB) = {
LOOP
}
}; implicit val Name = new Name ()
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("Result",result).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryUpdateRegistryDef(name: String, typeA: String, typeB: String, op: OpType)(loop: String) = {
"""
class Name private[linalg] () extends BinaryUpdateRegistry[TypeA, TypeB, TypeOp] {
override def bindingMissing(a: TypeA, b: TypeB) {
LOOP
}
}
val Name = new Name ()
implicit def Name_def[A <: TypeA, B <: TypeB]:BinaryUpdateOp[A, B, TypeOp] = (
Name.asInstanceOf[BinaryUpdateOp[A, B, TypeOp]]
)""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryUpdateRegistry(name: String, typeA: String, typeB: String, op: OpType)(loop: String) = {
"""
class Name private[linalg] () extends BinaryUpdateRegistry[TypeA, TypeB, TypeOp] {
override def bindingMissing(a: TypeA, b: TypeB) {
LOOP
}
}; implicit val Name = new Name ()
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryRegistry(name: String, typeA: String, typeB: String, op: OpType, result: String)(loop: String) = {
"""
class Name private[linalg] () extends BinaryRegistry[TypeA, TypeB, TypeOp, Result] {
override def bindingMissing(a: TypeA, b: TypeB) = {
LOOP
}
}; implicit val Name = new Name()
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("Result",result).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def genBinaryRegistryDef(name: String, typeA: String, typeB: String, op: OpType, result: String)(loop: String) = {
"""
class Name private[linalg] () extends BinaryRegistry[TypeA, TypeB, TypeOp, Result] {
override def bindingMissing(a: TypeA, b: TypeB) = {
LOOP
}
};
val Name = new Name()
implicit def Name_def[A <: TypeA, B <: TypeB]:BinaryOp[A, B, TypeOp, Result] = (
Name.asInstanceOf[BinaryOp[A, B, TypeOp, Result]]
)
""".replaceAll("TypeA",typeA).replaceAll("Name",name).replaceAll("Result",result).replaceAll("TypeB", typeB).replaceAll("TypeOp", op.getClass.getName.replaceAll("[$]","")).replaceAll("LOOP",loop)
}
def register(owner: String, genName: String, myName: String) = {
"%s.%s.register(%s)".format(owner, genName, myName)
}
def binaryUpdateDV_scalar_loop(op: (String,String)=>String):String = {
"""val ad = a.data
var i = 0
var aoff = a.offset
while(i < a.length) {
ad(aoff) = %s
aoff += a.stride
i += 1
}
""".format(op("ad(aoff)","b")).replaceAll(" "," ")
}
def binaryUpdateDV_DV_loop(op: (String,String)=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
val ad = a.data
val bd = b.data
var aoff = a.offset
var boff = b.offset
var i = 0
while(i < a.length) {
ad(aoff) = %s
aoff += a.stride
boff += b.stride
i += 1
}
""".format(op("ad(aoff)","bd(boff)")).replaceAll(" "," ")
}
def binaryUpdateDV_V_loop(op: (String,String)=>String, zeroIsIdempotent: Boolean):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
for( (i,v) <- b.%s) {
a(i) = %s
}
""".format(if(zeroIsIdempotent) "activeIterator" else "iterator", op("a(i)","v")).replaceAll(" "," ")
}
def binaryUpdateV_S_loop(op: (String,String)=>String, zeroIsIdempotent: Boolean):String = {
"""
for( (i,v) <- a.%s) {
a(i) = %s
}
""".format(if(zeroIsIdempotent) "activeIterator" else "iterator", op("v","b")).replaceAll(" "," ")
}
def binaryUpdateDM_scalar_loop(op: (String, String)=>String):String = {
"""
val ad = a.data
var c = 0
while(c < a.cols) {
var r = 0
while(r < a.rows) {
ad(a.linearIndex(r, c)) = %s
r += 1
}
c += 1
}
""".format(op("ad(a.linearIndex(r,c))","b"))
}
def binaryUpdateDM_DM_loop(op: (String, String)=>String):String = {
"""
| require(a.rows == b.rows, "Matrices must have same number of rows!")
| require(a.cols == b.cols, "Matrices must have same number of cols!")
| val ad = a.data
| val bd = b.data
| var c = 0
| while(c < a.cols) {
| var r = 0
| while(r < a.rows) {
| ad(a.linearIndex(r, c)) = %s
| r += 1
| }
| c += 1
| }
""".stripMargin.format(op("ad(a.linearIndex(r,c))","bd(b.linearIndex(r,c))"))
}
def binaryUpdateDM_M_loop(op: (String, String)=>String, ignored: Boolean):String = {
"""
| require(a.rows == b.rows, "Matrices must have same number of rows!")
| require(a.cols == b.cols, "Matrices must have same number of cols!")
| val ad = a.data
| var c = 0
| while(c < a.cols) {
| var r = 0
| while(r < a.rows) {
| ad(a.linearIndex(r, c)) = %s
| r += 1
| }
| c += 1
| }
""".stripMargin.format(op("ad(a.linearIndex(r,c))","b(r,c)"))
}
val ops = Map(
"Double" -> Map[OpType,(String,String)=>String](OpAdd -> {_ + " + " + _},
OpSub -> {_ + " - " + _},
OpMulScalar -> {_ + " * " + _},
OpMulMatrix -> {_ + " * " + _},
OpDiv -> {_ + " / " + _},
OpMod -> {_ + " % " + _},
OpSet -> {(a,b) => b},
OpPow -> {"scala.math.pow("+ _ + ", " + _ + ")"}
),
"Float" -> Map[OpType,(String,String)=>String](OpAdd -> {_ + " + " + _},
OpSub -> {_ + " - " + _},
OpMulScalar -> {_ + " * " + _},
OpMulMatrix -> {_ + " * " + _},
OpDiv -> {_ + " / " + _},
OpMod -> {_ + " % " + _},
OpSet -> {(a,b) => b},
OpPow -> {"scala.math.pow("+ _ + ", " + _ + ").toFloat"}
),
"Int" -> Map[OpType,(String,String)=>String](OpAdd -> {_ + " + " + _},
OpSub -> {_ + " - " + _},
OpMulScalar -> {_ + " * " + _},
OpMulMatrix -> {_ + " * " + _},
OpDiv -> {_ + " / " + _},
OpMod -> {_ + " % " + _},
OpSet -> {(a,b) => b},
OpPow -> {"IntMath.ipow("+ _ + ", " + _ + ")"}
)
)
}
object GenDenseOps extends App {
val blacklist = Map("DenseVector" -> Set("canMulScalarInto_DV_S_Double", "canSetInto_DV_DV_Double",
"canAddInto_DV_DV_Double", "canSubInto_DV_DV_Double")).withDefaultValue(Set.empty)
def genHomogeneous(tpe: String,
generic: String,
parentTrait: String,
pckg: String,
f: File)
(loop: ((String, String)=>String)=>String,
loopS: ((String,String)=>String)=>String,
loopG: (((String,String)=>String),Boolean)=>String) {
val out = new FileOutputStream(f)
val print = new PrintStream(out)
import print.println
println("package " + pckg)
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
val genericClassName = tpe+"Ops_" +scalar +"_Generic"
val vector = "%s[%s]".format(tpe,scalar)
val gvector = "%s[%s]".format(generic,scalar)
generateDVDVTrait(print, tpe, scalar, genericClassName, generic, vector, ops, loop, loopS, generic == "Vector")
generateGenericOpsTrait(print, tpe, scalar, genericClassName, parentTrait, generic, vector, gvector, ops, loopG, generic == "Vector")
}
print.close()
}
def generateDVDVTrait(out: PrintStream,
tpe: String, scalar: String,
genericClassName: String,
generic: String,
vector: String,
ops: Map[OpType, (String, String) => String],
loop: ((String, String) => String) => String,
loopS: ((String, String) => String) => String,
genRegistries: Boolean) {
import out.println
println("/** This is an auto-generated trait providing operators for " + tpe + ". */")
println("trait " + tpe + "Ops_" + scalar + " extends " + genericClassName + " { this: " + tpe + ".type =>")
println(
"""
def pureFromUpdate_%s[Other,Op<:OpType](op: BinaryUpdateOp[%s, Other, Op])(implicit copy: CanCopy[%s]):BinaryOp[%s, Other, Op, %s] = {
new BinaryOp[%s, Other, Op, %s] {
override def apply(a : %s, b : Other) = {
val c = copy(a)
op(c, b)
c
}
}
}""".format(scalar, vector, vector, vector, vector, vector, vector, vector))
for ((op, fn) <- ops) {
import GenOperators._
val name = "can" + op.getClass.getSimpleName.drop(2).dropRight(1) + "Into_DV_DV_" + scalar
if (!blacklist(tpe)(name) && op != OpMulMatrix) { // don't generate OpMulMatrix for DV_DV
println(genBinaryUpdateOperator(name, vector, vector, op)(loop(fn)))
if (genRegistries)
println(" " + register(generic, GenVectorRegistries.getVVIntoName(op, scalar), name))
println()
println(" " + genBinaryAdaptor(name.replace("Into", ""), vector, vector, op, vector, "pureFromUpdate_" + scalar + "(" + name + ")"))
if (genRegistries)
println(" " + register(generic, GenVectorRegistries.getVVName(op, scalar), name.replace("Into", "")))
println()
}
val names = "can" + op.getClass.getSimpleName.drop(2).dropRight(1) + "Into_DV_S_" + scalar
if (!blacklist(tpe)(names)) {
println(genBinaryUpdateOperator(names, vector, scalar, op)(loopS(fn)))
if (genRegistries)
println(" " + register(generic, GenVectorRegistries.getVSIntoName(op, scalar), names))
println()
println(" " + genBinaryAdaptor(names.replace("Into", ""), vector, scalar, op, vector, "pureFromUpdate_" + scalar + "(" + names + ")"))
if (genRegistries)
println(" " + register(generic, GenVectorRegistries.getVSName(op, scalar), names.replaceAll("Into", "")))
println()
}
}
println("}")
}
def generateGenericOpsTrait(out: PrintStream,
tpe: String, scalar: String,
genericClassName: String,
parent: String,
generic: String,
vector: String,
gvector: String,
ops: Map[OpType, (String, String) => String],
loopG: ((String, String) => String, Boolean) => String,
genRegistries: Boolean) {
import out.println
println("/** This is an auto-generated trait providing operators for " + tpe + ". */")
println("trait " + genericClassName + " extends " + parent + "{ this: " + tpe + ".type =>")
println(
"""
def pureRegistryFromUpdate_%s[Other,Op<:OpType](op: BinaryUpdateRegistry[%s, Other, Op])(implicit copy: CanCopy[%s]):BinaryRegistry[%s, Other, Op, %s] = {
new BinaryRegistry[%s, Other, Op, %s] {
override def bindingMissing(a : %s, b : Other) = {
val c = copy(a)
op(c, b)
c
}
}
}
""".format(scalar, vector, vector, vector, vector, vector, vector, vector))
for ((op, fn) <- ops if op != OpMulMatrix) {
import GenOperators._
val namegen = "can" + op.getClass.getSimpleName.drop(2).dropRight(1) + "Into_DV_V_" + scalar
println(genBinaryUpdateRegistryDef(namegen, vector, gvector, op)(loopG(fn, op == OpAdd || op == OpSub)))
if (generic == "Vector")
println(" " + register(generic, GenVectorRegistries.getVVIntoName(op, scalar), namegen))
println()
println(" " + genBinaryRegistryAdaptorDef(namegen.replace("Into", ""), vector, gvector, op, vector, "pureRegistryFromUpdate_" + scalar + "(" + namegen + ")"))
if (generic == "Vector")
println(" " + register(generic, GenVectorRegistries.getVVName(op, scalar), namegen.replace("Into", "")))
println()
}
println("}")
}
val out = new File("math/src/main/scala/breeze/linalg/DenseVectorOps.scala")
genHomogeneous("DenseVector", "Vector", "AnyRef", "breeze.linalg", out)(
GenOperators.binaryUpdateDV_DV_loop _,
GenOperators.binaryUpdateDV_scalar_loop _,
GenOperators.binaryUpdateDV_V_loop _
)
val outM = new File("math/src/main/scala/breeze/linalg/DenseMatrixOps.scala")
genHomogeneous("DenseMatrix", "Matrix", "LowPriorityDenseMatrix", "breeze.linalg", outM)(
GenOperators.binaryUpdateDM_DM_loop _,
GenOperators.binaryUpdateDM_scalar_loop _,
GenOperators.binaryUpdateDM_M_loop _)
}
object GenDVSVSpecialOps extends App {
import GenOperators._
def fastLoop(op: (String,String)=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
val adata = a.data
val aoff = a.offset
val astride = a.stride
val bd = b.data
val bi = b.index
val bsize = b.iterableSize
var i = 0
while(i < bsize) {
if(b.isActive(i)) {
val j = aoff + bi(i) * astride
adata(j) = %s
}
i += 1
}
""".format(op("adata(j)","bd(i)")).replaceAll(" "," ")
}
def slowLoop(op: (String,String)=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
val adata = a.data
var j = a.offset
val astride = a.stride
var i = 0
while(i < b.length) {
adata(j) = %s
i += 1
j += astride
}
""".format(op("adata(j)","b(i)")).replaceAll(" "," ")
}
def gen(sparseType: String, out: PrintStream) {
import out._
println("package breeze.linalg")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val vector = "%s[%s]".format("DenseVector",scalar)
val svector = "%s[%s]".format(sparseType,scalar)
println("/** This is an auto-generated trait providing operators for DenseVector and " + sparseType + "*/")
println("trait DenseVectorOps_"+sparseType+"_"+scalar +" { this: DenseVector.type =>")
for( (op,fn) <- ops if op != OpMulMatrix) {
val name = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_DV_" + sparseType + "_" + scalar
val loop = if(op == OpSub || op == OpAdd) fastLoop _ else slowLoop _
println(genBinaryUpdateOperator(name, vector, svector, op)(loop(fn)))
println(" " + register("Vector", GenVectorRegistries.getVVIntoName(op, scalar), name))
println()
println(" " +genBinaryAdaptor(name.replace("Into",""), vector, svector, op, vector, "pureFromUpdate_"+scalar+ "(" + name+ ")"))
println(" " + register("Vector", GenVectorRegistries.getVVName(op, scalar), name.replace("Into","")))
println()
}
// dot product
val dotName = "canDotProductDV_SV_" + scalar
println(genBinaryOperator(dotName, vector, svector, OpMulInner, scalar){
"""require(b.length == a.length, "Vectors must be the same length!")
var result: """ + scalar + """ = 0
val bd = b.data
val bi = b.index
val bsize = b.iterableSize
val adata = a.data
val aoff = a.offset
val stride = a.stride
var i = 0
while(i < bsize) {
if(b.isActive(i)) result += adata(aoff + bi(i) * stride) * bd(i)
i += 1
}
result""".replaceAll(" "," ")
})
println(" " + register("Vector", GenVectorRegistries.getDotName(scalar), dotName))
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/DenseVectorSVOps.scala")))
gen("SparseVector", out)
out.close()
}
/*
object GenCounterOps extends App {
import GenOperators._
def plusSubIntoLoop(op: (String,String)=>String):String = {
"""
for( (k,v) <- b.active.pairs) {
a(k) = %s
}
""".format(op("a(k)","v")).replaceAll(" "," ")
}
def timesLoop(op: (String,String)=>String):String = {
"""
val zero = implicitly[Semiring[V]].zero
val result = Counter[K, V]()
for( (k,v) <- b.active.pairs) {
val va =
r(k) = %s
}
r
""".format(op("a(k)","v")).replaceAll(" "," ")
}
def slowLoop(op: (String,String)=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
var i = 0
while(i < b.length) {
a(i) = %s
i += 1
}
""".format(op("a(i)","b(i)")).replaceAll(" "," ")
}
def gen(sparseType: String, out: PrintStream) {
import out._
println("package breeze.linalg")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val vector = "%s[%s]".format("DenseVector",scalar)
val svector = "%s[%s]".format(sparseType,scalar)
println("/** This is an auto-generated trait providing operators for DenseVector and " + sparseType + "*/")
println("trait DenseVectorOps_"+sparseType+"_"+scalar +" { this: DenseVector.type =>")
for( (op,fn) <- ops) {
val name = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_DV_" + sparseType + "_" + scalar
val loop = if(op == OpSub || op == OpAdd) fastLoop _ else slowLoop _
println(genBinaryUpdateOperator(name, vector, svector, op)(loop(fn)))
println()
println(" " +genBinaryAdaptor(name.replace("Into",""), vector, svector, op, vector, "pureFromUpdate_"+scalar+ "(" + name+ ")"))
println()
}
// dot product
val dotName = "canDotProductDV_SV_" + scalar
println(genBinaryOperator(dotName, vector, svector, OpMulInner, scalar){
"""require(b.length == a.length, "Vectors must be the same length!")
var result: """ + scalar + """ = 0
val bd = b.data
val bi = b.index
val bsize = b.iterableSize
var i = 0
while(i < b.size) {
if(b.isActive(i)) result += a(bi(i)) * bd(i)
i += 1
}
result""".replaceAll(" "," ")
})
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/DenseVectorSVOps.scala")))
gen("SparseVector", out)
out.close()
}
*/
object GenSVOps extends App {
import GenOperators._
def plusIntoLoop(tpe: String, op: (String,String)=>String, postProcessCopy: String=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
// TODO: decide the appropriate value of 3 and 30 here.
if(b.activeSize > a.activeSize * 3 && b.activeSize > 30) {
val c = copy(b)
apply(c, a)
%s
a.use(c.index, c.data, c.activeSize)
return
}
var buf:Array[Type] = null
var bufi:Array[Int] = null
var nactiveSize = 0
val bd = b.data
val bi = b.index
val bsize = b.iterableSize
var i = 0
while(i < bsize) {
if (a.contains(bi(i))) {
// just add it in if it's there
a(bi(i)) = %s
} else { // not there
if(buf eq null) {
buf = new Array[Type](b.activeSize - i)
bufi = new Array[Int](b.activeSize - i)
} else if(buf.length == nactiveSize) {
buf = Arrays.copyOf(buf, nactiveSize + b.activeSize - i)
bufi = Arrays.copyOf(bufi, nactiveSize + b.activeSize - i)
}
// append to buffer to merged in later
buf(nactiveSize) = %s
bufi(nactiveSize) = bi(i)
nactiveSize += 1
}
i += 1
}
// merge two disjoint sorted lists
if(buf != null) {
val result = new Array[Type](a.activeSize + nactiveSize)
val resultI = new Array[Int](a.activeSize + nactiveSize)
var ni = 0
var ai = 0
var out = 0
while(ni < nactiveSize) {
while(ai < a.activeSize && a.index(ai) < bufi(ni) ) {
result(out) = a.data(ai)
resultI(out) = a.index(ai)
ai += 1
out += 1
}
result(out) = buf(ni)
resultI(out) = bufi(ni)
out += 1
ni += 1
}
System.arraycopy(a.data, ai, result, out, result.length - out)
System.arraycopy(a.index, ai, resultI, out, result.length - out)
out = result.length
a.use(resultI, result, out)
}
""".replaceAll("Type",tpe).format(postProcessCopy("c"),op("a(bi(i))","bd(i)"), op("buf(nactiveSize)","bd(i)")).replaceAll(" "," ")
}
def timesLoopTemplate(tpe: String, zero: String, finish: (String, String, String)=>String) = {
"""require(b.length == a.length, "Vectors must be the same length!")
val outD = new Array[Type](a.activeSize min b.activeSize)
val outI = new Array[Int](a.activeSize min b.activeSize)
var out = 0
val looper = if(a.activeSize < b.activeSize) a else b
val other = if(a.activeSize < b.activeSize) b else a
var i = 0
val bd = looper.data
val bi = looper.index
val bsize = looper.iterableSize
while(i < bsize) {
if(looper.isActive(i)) {
val p = other(bi(i)) * bd(i)
if (p != Zero) {
outD(out) = p
outI(out) = bi(i)
out += 1
}
}
i += 1
}
%s
""".replaceAll("Type",tpe).replaceAll("Zero", zero).format(finish("outD", "outI", "out"))
}
def timesIntoLoop(tpe: String, zero: String) = timesLoopTemplate(tpe, zero, {(data, index, activeSize) =>
"a.use(%s, %s, %s)".format(index, data, activeSize)
})
def timesLoop(tpe: String, zero: String) = timesLoopTemplate(tpe, zero, {(data, index, activeSize) =>
"new SparseVector(%s, %s, %s, a.length)".format(index, data, activeSize)
})
def setLoop = """require(b.length == a.length, "Vectors must be the same length!")
a.use(Arrays.copyOf(b.index), Arrays.copyOf(b.data), b.activeSize)"""
def slowLoop(op: (String,String)=>String):String = {
"""require(b.length == a.length, "Vectors must be the same length!")
var i = 0
while(i < b.length) {
a(i) = %s
i += 1
}
""".format(op("a(i)","b(i)")).replaceAll(" "," ")
}
def scalarLoop(op: (String,String)=>String):String = {
"""
var i = 0
while(i < a.length) {
a(i) = %s
i += 1
}
""".format(op("a(i)","b")).replaceAll(" "," ")
}
def scalarMultLoop(op: (String,String)=>String) = {
"""
var i = 0
while(i < a.activeSize) {
a.data(i) = %s
i += 1
}
""".format(op("a.data(i)","b"))
}
def gen(out: PrintStream) {
import out._
println("package breeze.linalg")
println("import java.util._")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val vector = "SparseVector[%s]" format scalar
println("/** This is an auto-generated trait providing operators for SparseVector */")
println("trait SparseVectorOps_"+scalar +" { this: SparseVector.type =>")
println(
"""
def pureFromUpdate_%s[Other,Op<:OpType](op: BinaryUpdateOp[%s, Other, Op])(implicit copy: CanCopy[%s]):BinaryOp[%s, Other, Op, %s] = {
new BinaryOp[%s, Other, Op, %s] {
override def apply(a : %s, b : Other) = {
val c = copy(a)
op(c, b)
c
}
}
}
""".format(scalar,vector, vector, vector, vector, vector, vector, vector))
for( (op,fn) <- ops if op != OpMulScalar && op != OpMulMatrix) {
val name = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_VV_" + scalar
def postProcesscopy(c: String) = if(op == OpAdd) "" else if(op == OpSub) c + "*= (-1).to"+scalar else sys.error(":(")
val loop = if(op == OpSub || op == OpAdd) plusIntoLoop(scalar, (_:(String,String)=>String), postProcesscopy _) else slowLoop _
println(genBinaryUpdateOperator(name, vector, vector, op)(loop(fn)))
println(" " + register("Vector", GenVectorRegistries.getVVIntoName(op, scalar), name))
println()
println(" " +genBinaryAdaptor(name.replace("Into",""), vector, vector, op, vector, "pureFromUpdate_"+scalar+ "(" + name+ ")"))
println(" " + register("Vector", GenVectorRegistries.getVVName(op, scalar), name.replace("Into", "")))
println()
val names = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_SV_S_"+scalar
val loopS = if(op == OpDiv) scalarMultLoop(fn) else scalarLoop(fn)
println(genBinaryUpdateOperator(names, vector, scalar, op)(loopS))
println(" " + register("Vector", GenVectorRegistries.getVSIntoName(op, scalar), names))
println()
println(" " +genBinaryAdaptor(names.replace("Into",""), vector, scalar, op, vector, "pureFromUpdate_"+scalar+ "(" + names+ ")"))
println(" " + register("Vector", GenVectorRegistries.getVSName(op, scalar), names.replace("Into", "")))
println()
};
{ //mul
val op = OpMulScalar
val name = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_VV_" + scalar
val loop = timesIntoLoop(scalar, "0")
println(genBinaryUpdateOperator(name, vector, vector, op)(loop))
println()
val nonupdate = name.replace("Into","")
println(genBinaryOperator(nonupdate, vector, vector, OpMulScalar, vector){timesLoop(scalar, "0")})
println()
val names = "can"+op.getClass.getSimpleName.drop(2).dropRight(1)+"Into_SV_S_"+scalar
println(genBinaryUpdateOperator(names, vector, scalar, op)(scalarMultLoop(ops(OpMulScalar))))
println()
println(" " +genBinaryAdaptor(names.replace("Into",""), vector, scalar, op, vector, "pureFromUpdate_"+scalar+ "(" + names+ ")"))
println()
val namesm = "canMulMatrixInto_SV_S_"+scalar
println(genBinaryUpdateOperator(namesm, vector, scalar, OpMulMatrix)(scalarMultLoop(ops(OpMulMatrix))))
println()
println(" " +genBinaryAdaptor(namesm.replace("Into",""), vector, scalar, OpMulMatrix, vector, "pureFromUpdate_"+scalar+ "(" + namesm+ ")"))
println()
}
// dot product
val dotName = "canDotProductSV_" + scalar
println(genBinaryOperator(dotName, vector, vector, OpMulInner, scalar){
"""require(b.length == a.length, "Vectors must be the same length!")
if (a.activeSize < b.activeSize) {
apply(b, a)
} else {
var result: """ + scalar + """ = 0
val ad = a.data
val bd = b.data
val ai = a.index
val bi = b.index
val bsize = b.iterableSize
val asize = a.iterableSize
var i = 0
// TODO: this can be made faster by using the last index to bracket the search as well.
var lastOff = 0
while(i < bsize) {
val aoff = Arrays.binarySearch(ai, lastOff, asize, bi(i))
if(aoff >= 0) {
lastOff = aoff
result += ad(aoff) * bd(i)
} else {
lastOff = ~aoff
}
i += 1
}
result
}""".replaceAll(" "," ")
})
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/SparseVectorOps.scala")))
gen(out)
out.close()
}
object GenVectorRegistries extends App {
def genHomogeneous(tpe: String, pckg: String, f: File)(loop: (((String,String)=>String), Boolean)=>String,
loopS: (((String,String)=>String), Boolean)=>String) {
val out = new FileOutputStream(f)
val print = new PrintStream(out)
import print.println
println("package " + pckg)
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
import GenOperators._
for( (scalar,ops) <- GenOperators.ops) {
val vector = "%s[%s]".format(tpe,scalar)
println("/** This is an auto-generated trait providing operators for " + tpe + ". */")
println("trait "+tpe+"Ops_"+scalar +" { this: "+tpe+".type =>")
println(
"""
def pureFromUpdate_%s[Other,Op<:OpType](op: BinaryUpdateOp[%s, Other, Op])(implicit copy: CanCopy[%s]):BinaryRegistry[%s, Other, Op, %s] = {
new BinaryRegistry[%s, Other, Op, %s] {
override def bindingMissing(a : %s, b : Other) = {
val c = copy(a)
op(c, b)
c
}
}
}
""".format(scalar,vector, vector, vector, vector, vector, vector, vector))
for( (op,fn) <- ops) {
if(op != OpMulMatrix) {
val name = getVVIntoName(op, scalar)
println(genBinaryUpdateRegistryDef(name, vector, vector, op)(loop(fn, op == OpAdd || op == OpSub)))
println()
println(" " +genBinaryRegistryAdaptor(getVVName(op, scalar), vector, vector, op, vector, "pureFromUpdate_"+scalar+ "(" + name+ ")"))
println()
}
val names: String = getVSIntoName(op, scalar)
println(genBinaryUpdateRegistryDef(names, vector, scalar, op)(loopS(fn, op == OpAdd || op == OpSub)))
println()
println(" " +genBinaryRegistryAdaptor(getVSName(op, scalar), vector, scalar, op, vector, "pureFromUpdate_"+scalar+ "(" + names+ ")"))
println()
}
val dotName: String = getDotName(scalar)
println(genBinaryRegistryDef(dotName, vector, vector, OpMulInner, scalar){
"""require(b.length == a.length, "Vectors must be the same length!")
var result: """ + scalar + """ = 0
for( (i, v) <- b.activeIterator) {
result += a(i) * v
}
result""".replaceAll(" "," ")
})
println("}")
}
print.close()
}
def getDotName(scalar: String): String = {
val dotName = "canDotProductV_" + scalar
dotName
}
def getVSIntoName(op: OpType, scalar: String): String = {
val names = "can" + op.getClass.getSimpleName.drop(2).dropRight(1) + "Into_V_S_" + scalar
names
}
def getVVIntoName(op: OpType, scalar: String): String = {
"can" + op.getClass.getSimpleName.drop(2).dropRight(1) + "Into_V_V_" + scalar
}
def getVSName(op: OpType, scalar: String): String = {
getVSIntoName(op, scalar).replaceAll("Into", "")
}
def getVVName(op: OpType, scalar: String): String = {
getVVIntoName(op, scalar).replaceAll("Into", "")
}
val out = new File("math/src/main/scala/breeze/linalg/VectorOps.scala")
genHomogeneous("Vector", "breeze.linalg", out)(
GenOperators.binaryUpdateDV_V_loop _,
GenOperators.binaryUpdateV_S_loop _
)
}
object GenCSCOps extends App {
import GenOperators._
def gen(out: PrintStream) {
import out._
println("package breeze.linalg")
println("import java.util._")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val matrix = "CSCMatrix[%s]" format scalar
val dmatrix = "DenseMatrix[%s]" format scalar
val gvector = "Vector[%s]" format scalar
println("/** This is an auto-generated trait providing operators for CSCMatrix */")
println("trait CSCMatrixOps_"+scalar +" { this: CSCMatrix.type =>")
println(genBinaryRegistryDef("canMulM_V_" + scalar, matrix, gvector, OpMulMatrix, gvector){"""
val res = DenseVector.zeros[Scalar](a.rows)
var c = 0
while(c < a.cols) {
var rr = a.colPtrs(c)
val rrlast = a.colPtrs(c+1)
while (rr < rrlast) {
val r = a.rowIndices(rr)
res(r) += a.data(rr) * b(c)
rr += 1
}
c += 1
}
res""".replaceAll("Scalar", scalar)
})
println(genBinaryOperator("canMulM_DM_" + scalar, matrix, dmatrix, OpMulMatrix, dmatrix){"""
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Scalar](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = 0
while (j < a.cols) {
val v = b(j, i)
var k = a.colPtrs(j)
while (k < a.colPtrs(j+1)) {
res(a.rowIndices(k), i) += v * a.data(k)
k += 1
}
j += 1
}
i += 1
}
res""".replaceAll("Scalar", scalar)
})
println(genBinaryOperator("canMulDM_M_" + scalar, dmatrix, matrix, OpMulMatrix, dmatrix){"""
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Scalar](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
val ival = b.rowIndices(j)
var k = 0
while (k < a.rows) {
res(k,i) += a(k,ival)*dval
k += 1
}
j += 1
}
i += 1
}
res""".replaceAll("Scalar", scalar)
})
println(genBinaryOperator("canMulM_M_" + scalar, matrix, matrix, OpMulMatrix, matrix){"""
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
var numnz = 0
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
numnz += a.colPtrs(b.rowIndices(j)+1) - a.colPtrs(b.rowIndices(j))
j += 1
}
i += 1
}
val res = new CSCMatrix.Builder[Scalar](a.rows, b.cols, numnz)
i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
var k = a.colPtrs(b.rowIndices(j))
while (k < a.colPtrs(b.rowIndices(j)+1)) {
res.add(a.rowIndices(k), i, a.data(k) * dval)
k += 1
}
j += 1
}
i += 1
}
res.result()""".replaceAll("Scalar", scalar)
})
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/CSCMatrixOps.scala")))
gen(out)
out.close()
}
object GenDMMultOps extends App {
import GenOperators._
def gen(out: PrintStream) {
import out._
println("package breeze.linalg")
println("import java.util._")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val matrix = "DenseMatrix[%s]" format scalar
val vector = "DenseVector[%s]" format scalar
val gvector = "Vector[%s]" format scalar
val gmatrix = "Matrix[%s]" format scalar
println("/** This is an auto-generated trait providing multiplication for DenseMatrix */")
println("trait DenseMatrixMultOps_"+scalar +" extends DenseMatrixOps_" + scalar + " { this: DenseMatrix.type =>")
println(genBinaryRegistryDef("canMulM_V_" + scalar, matrix, gvector, OpMulMatrix, vector){"""
// TODO: this could probably be much faster?
require(a.cols == b.length)
val res = DenseVector.zeros[Scalar](a.rows)
var c = 0
while(c < a.cols) {
var r = 0
while (r < a.rows) {
val v = a(r, c)
res(r) += v * b(c)
r += 1
}
c += 1
}
res """.replaceAll("Scalar", scalar)
})
println(genBinaryRegistryDef("canMulM_M_" + scalar, matrix, gmatrix, OpMulMatrix, matrix){"""
// TODO: this could probably be much faster
val res = DenseMatrix.zeros[Scalar](a.rows, b.cols)
require(a.cols == b.rows)
var c = 0
while(c < a.cols) {
var r = 0
while (r < a.rows) {
val v = a(r, c)
var j = 0
while(j < b.cols) {
res(r, j) += v * b(c, j)
j += 1
}
r += 1
}
c += 1
}
res """.replaceAll("Scalar", scalar)
})
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/DenseMatrixMulOps.scala")))
gen(out)
out.close()
}
object GenMatrixMultOps extends App {
import GenOperators._
def gen(out: PrintStream) {
import out._
println("package breeze.linalg")
println("import java.util._")
println("import breeze.linalg.operators._")
println("import breeze.linalg.support._")
println("import breeze.numerics._")
for( (scalar,ops) <- GenOperators.ops) {
println()
val matrix = "Matrix[%s]" format scalar
val vector = "Vector[%s]" format scalar
val gvector = "Vector[%s]" format scalar
val gmatrix = "Matrix[%s]" format scalar
println("/** This is an auto-generated trait providing multiplication for Matrix */")
println("trait MatrixMultOps_"+scalar +" { this: Matrix.type =>")
println(genBinaryRegistryDef("canMulM_V_" + scalar, matrix, gvector, OpMulMatrix, vector){"""
// TODO: this could probably be much faster?
require(a.cols == b.length)
val res = DenseVector.zeros[Scalar](a.rows)
var c = 0
while(c < a.cols) {
var r = 0
while (r < a.rows) {
val v = a(r, c)
res(r) += v * b(c)
r += 1
}
c += 1
}
res """.replaceAll("Scalar", scalar)
})
println(genBinaryRegistryDef("canMulM_M_" + scalar, matrix, gmatrix, OpMulMatrix, matrix){"""
// TODO: this could probably be much faster
val res = DenseMatrix.zeros[Scalar](a.rows, b.cols)
require(a.cols == b.rows)
var c = 0
while(c < a.cols) {
var r = 0
while (r < a.rows) {
val v = a(r, c)
var j = 0
while(j < b.cols) {
res(r, j) += v * b(c, j)
j += 1
}
r += 1
}
c += 1
}
res """.replaceAll("Scalar", scalar)
})
println("}")
}
}
val out = new PrintStream(new FileOutputStream(new File("math/src/main/scala/breeze/linalg/MatrixMulOps.scala")))
gen(out)
out.close()
}
object GenAll extends App {
GenDenseOps.main(Array.empty)
GenDVSVSpecialOps.main(Array.empty)
GenSVOps.main(Array.empty)
GenVectorRegistries.main(Array.empty)
GenCSCOps.main(Array.empty)
GenDMMultOps.main(Array.empty)
GenMatrixMultOps.main(Array.empty)
} | tjhunter/scalanlp-core | math/src/main/scala/breeze/linalg/support/codegen/GenOperators.scala | Scala | apache-2.0 | 43,060 |
/* *\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\* */
package squants
import org.scalatest.{ FlatSpec, Matchers }
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class RatioSpec extends FlatSpec with Matchers {
behavior of "Ratio"
case class TestRatio(base: Mass, counter: Length) extends Ratio[Mass, Length]
it should "create a ratio with the correct base and counter values" in {
val ratio = TestRatio(Kilograms(10), Meters(5))
ratio.base should be(Kilograms(10))
ratio.counter should be(Meters(5))
}
it should "convert a value of the base type to a value of the counter type" in {
val ratio = TestRatio(Kilograms(10), Meters(5))
ratio.convertToCounter(Kilograms(25)) should be(Meters(12.5))
}
it should "convert a value of the counter type to a value of the base type" in {
val ratio = TestRatio(Kilograms(10), Meters(5))
ratio.convertToBase(Meters(25)) should be(Kilograms(50))
}
behavior of "LikeRatio"
case class TestLikeRatio(base: Mass, counter: Mass) extends LikeRatio[Mass]
it should "create a ratio with the correct base, counter and ratio values" in {
val ratio = TestLikeRatio(Kilograms(10), Kilograms(5))
ratio.base should be(Kilograms(10))
ratio.counter should be(Kilograms(5))
ratio.ratio should be(2)
ratio.inverseRatio should be(.5)
}
it should "convert a value of the base type to a value of the counter type" in {
val ratio = TestLikeRatio(Kilograms(10), Kilograms(5))
ratio.convertToCounter(Kilograms(25)) should be(Kilograms(12.5))
}
it should "convert a value of the counter type to a value of the base type" in {
val ratio = TestLikeRatio(Kilograms(10), Kilograms(5))
ratio.convertToBase(Kilograms(25)) should be(Kilograms(50))
}
}
| derekmorr/squants | shared/src/test/scala/squants/RatioSpec.scala | Scala | apache-2.0 | 2,242 |
// -----------------------------------------------------------------------------
//
// Scalax - The Scala Community Library
// Copyright (c) 2005-8 The Scalax Project. All rights reserved.
//
// The primary distribution site is http://scalax.scalaforge.org/
//
// This software is released under the terms of the Revised BSD License.
// There is NO WARRANTY. See the file LICENSE for the full text.
//
// -----------------------------------------------------------------------------
package scala.tools.scalap
package scalax
package rules
trait Name {
def name : String
override def toString = name
}
/** A factory for rules.
*
* @author Andrew Foggin
*
* Inspired by the Scala parser combinator.
*/
trait Rules {
implicit def rule[In, Out, A, X](f : In => Result[Out, A, X]) : Rule[In, Out, A, X] = new DefaultRule(f)
implicit def inRule[In, Out, A, X](rule : Rule[In, Out, A, X]) : InRule[In, Out, A, X] = new InRule(rule)
implicit def seqRule[In, A, X](rule : Rule[In, In, A, X]) : SeqRule[In, A, X] = new SeqRule(rule)
def from[In] = new {
def apply[Out, A, X](f : In => Result[Out, A, X]) = rule(f)
}
def state[s] = new StateRules {
type S = s
val factory = Rules.this
}
def success[Out, A](out : Out, a : A) = rule { in : Any => Success(out, a) }
def failure = rule { in : Any => Failure }
def error[In] = rule { in : In => Error(in) }
def error[X](err : X) = rule { in : Any => Error(err) }
def oneOf[In, Out, A, X](rules : Rule[In, Out, A, X] *) : Rule[In, Out, A, X] = new Choice[In, Out, A, X] {
val factory = Rules.this
val choices = rules.toList
}
def ruleWithName[In, Out, A, X](_name : String, f : In => Result[Out, A, X]) : Rule[In, Out, A, X] with Name =
new DefaultRule(f) with Name {
val name = _name
}
class DefaultRule[In, Out, A, X](f : In => Result[Out, A, X]) extends Rule[In, Out, A, X] {
val factory = Rules.this
def apply(in : In) = f(in)
}
/** Converts a rule into a function that throws an Exception on failure. */
def expect[In, Out, A, Any](rule : Rule[In, Out, A, Any]) : In => A = (in) => rule(in) match {
case Success(_, a) => a
case Failure => throw new ScalaSigParserError("Unexpected failure")
case Error(x) => throw new ScalaSigParserError("Unexpected error: " + x)
}
}
/** A factory for rules that apply to a particular context.
*
* @requires S the context to which rules apply.
*
* @author Andrew Foggin
*
* Inspired by the Scala parser combinator.
*/
trait StateRules {
type S
type Rule[+A, +X] = rules.Rule[S, S, A, X]
val factory : Rules
import factory._
def apply[A, X](f : S => Result[S, A, X]) = rule(f)
def unit[A](a : => A) = apply { s => Success(s, a) }
def read[A](f : S => A) = apply { s => Success(s, f(s)) }
def get = apply { s => Success(s, s) }
def set(s : => S) = apply { oldS => Success(s, oldS) }
def update(f : S => S) = apply { s => Success(s, f(s)) }
def nil = unit(Nil)
def none = unit(None)
/** Create a rule that identities if f(in) is true. */
def cond(f : S => Boolean) = get filter f
/** Create a rule that succeeds if all of the given rules succeed.
@param rules the rules to apply in sequence.
*/
def allOf[A, X](rules : Seq[Rule[A, X]]) = {
def rep(in : S, rules : List[Rule[A, X]], results : List[A]) : Result[S, List[A], X] = {
rules match {
case Nil => Success(in, results.reverse)
case rule::tl => rule(in) match {
case Failure => Failure
case Error(x) => Error(x)
case Success(out, v) => rep(out, tl, v::results)
}
}
}
in : S => rep(in, rules.toList, Nil)
}
/** Create a rule that succeeds with a list of all the provided rules that succeed.
@param rules the rules to apply in sequence.
*/
def anyOf[A, X](rules : Seq[Rule[A, X]]) = allOf(rules.map(_ ?)) ^^ { opts => opts.flatMap(x => x) }
/** Repeatedly apply a rule from initial value until finished condition is met. */
def repeatUntil[T, X](rule : Rule[T => T, X])(finished : T => Boolean)(initial : T) = apply {
// more compact using HoF but written this way so it's tail-recursive
def rep(in : S, t : T) : Result[S, T, X] = {
if (finished(t)) Success(in, t)
else rule(in) match {
case Success(out, f) => rep(out, f(t))
case Failure => Failure
case Error(x) => Error(x)
}
}
in => rep(in, initial)
}
}
trait RulesWithState extends Rules with StateRules {
val factory = this
}
| LPTK/intellij-scala | scalap/src/scalap/scalax/rules/Rules.scala | Scala | apache-2.0 | 4,596 |
package coursera.usgs
import retrofit.http.GET
import retrofit.{RetrofitError, RestAdapter, Callback}
import rx.lang.scala.Observable
import rx.lang.scala.subjects.AsyncSubject
import retrofit.client.Response
object Usgs {
private val restAdapter = new RestAdapter.Builder().setServer("http://earthquake.usgs.gov").build()
def apply(): Observable[Feature] = {
val subject = AsyncSubject[FeatureCollection]()
restAdapter.create(classOf[Usgs]).get(new Callback[FeatureCollection] {
def failure(error: RetrofitError): Unit = {
subject.onError(error.getCause)
}
def success(t: FeatureCollection, response: Response): Unit = {
subject.onNext(t)
subject.onCompleted()
}
})
subject.flatMap(collection => Observable(collection.features : _*))
}
}
private trait Usgs {
@GET("/earthquakes/feed/geojson/all/day")
def get(callback: Callback[FeatureCollection])
}
| mitochon/hexercise | src/mooc/reactive/week4.codesamples/src/test/scala/coursera/usgs/Usgs.scala | Scala | mit | 937 |
package dao.sitedata
import scala.concurrent.Future
import javax.inject.Inject
import play.api.db.slick.DatabaseConfigProvider
import play.api.db.slick.HasDatabaseConfigProvider
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import slick.driver.JdbcProfile
import slick.jdbc.GetResult
import models.sitedata.EquipmentModel
import play.db.NamedDatabase
import play.api.Logger
import org.joda.time.DateTime
import java.sql.Timestamp
import com.github.tototoshi.slick.PostgresJodaSupport._
trait IEquipmentModelDao extends BaseDao[EquipmentModel]{
def findAll(): Future[Seq[EquipmentModel]]
def findById(id: Long): Future[Option[EquipmentModel]]
def remove(id: Long): Future[Int]
def insert(p: EquipmentModel): Future[Unit]
def update(p2: EquipmentModel): Future[Unit]
}
class EquipmentModelDao @Inject()(@NamedDatabase("SiteData") protected val dbConfigProvider: DatabaseConfigProvider)
extends HasDatabaseConfigProvider[JdbcProfile] with IEquipmentModelDao {
// import driver.api._
import com.typesafe.slick.driver.ms.SQLServerDriver.api._
class EquipmentModelTable(tag: Tag)
extends Table[EquipmentModel](tag, models.sitedata.EquipmentModelDef.toTable) {
def id = column[Long]("EquipmentModelID", O.PrimaryKey)
def name = column[String]("EquipmentModelName")
def equipmentname = column[Int]("EquipmentNameID")
def displayindex = column[Int]("DisplayIndex")
// def lastmodifiedtime = column[DateTime]("LastModifiedTime")
// def lastmodifier = column[String]("LastModifier")
// def modifiedtimestamp = column[Timestamp]("modifiedtimestamp")
def * = (
id,
name,
equipmentname,
displayindex
// lastmodifiedtime,
// lastmodifier
// modifiedtimestamp
) <> (EquipmentModel.tupled, EquipmentModel.unapply _)
}
lazy val sourcefilename = new Exception().getStackTrace.head.getFileName
override def toTable = TableQuery[EquipmentModelTable]
private val Sites = toTable()
override def findAll(): Future[Seq[EquipmentModel]] = {
Logger.info(sourcefilename + " findAll called.")
db.run(Sites.result)
}
override def findById(id: Long): Future[Option[EquipmentModel]] = {
Logger.info("(" + sourcefilename + ")" + " findById(" + id + ") called.")
db.run(Sites.filter( _.id === id).result.headOption)
}
override def remove(id: Long): Future[Int] = {
/* db.run(Sites.filter( _.siteid === id).delete) */
Future(0) // dummy
}
override def insert(p: EquipmentModel): Future[Unit] = {
/* db.run(Sites += p).map { _ => () } */
Future(()) // dummy
}
override def update(p2: EquipmentModel) = Future[Unit] {
/* db.run(
* Sites.filter(_.siteid === p2.siteid)
* // .map(p => (p.name,p.details, p.price))
* .map(p => (p.sitename))
* // .update((p2.name,p2.details,p2.price))
* .update((p2.sitename))
* ) */
Future(()) // dummy
}
}
| tnddn/iv-web | portal/rest-portal/app/dao/sitedata/EquipmentModelDao.scala | Scala | apache-2.0 | 2,974 |
// Import library
import Jama._
object DoMultiplyMatrices_jama {
def main(args: Array[String]) {
val N = 2000
// Create the matrix
val A = new Matrix(N, N)
val B = new Matrix(N, N)
// Fill the matrix with random numbers
val r = new scala.util.Random(0)
for(i <- 0 until A.getRowDimension())
for(j <- 0 until A.getColumnDimension())
A.set(i, j, r.nextDouble())
for(i <- 0 until B.getRowDimension())
for(j <- 0 until B.getColumnDimension())
B.set(i, j, r.nextDouble())
println("---------------------------------------") ;
println("JAMA-Perform Matrix Multiplication: "+N) ;
println("---------------------------------------") ;
var startTime = System.currentTimeMillis
// Matrix product C=A'B
// val C = A.transpose().times(B)
val C = A.times(B)
var stopTime = System.currentTimeMillis
println("Elapsed time in milliseconds: " + (stopTime-startTime)/1000.0)
println("---------------------------------------") ;
}
}
| scienceopen/numba-examples | NASA_JulesKouatchou/Problem2/DoMultiplyMatrices_jama.scala | Scala | mit | 1,108 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactRedux
import scala.scalajs.js
/**
* If true, implements shouldComponentUpdate and shallowly compares the result of mergeProps,
* preventing unnecessary updates, assuming that the component is a pure component
* and does not rely on any input or state other than its props and the selected Redux store’s state.
* Defaults to true.
* @default true
*/
/**
* If true, stores a ref to the wrapped component instance and makes it available via
* getWrappedInstance() method. Defaults to false.
*/
@js.native
trait Options extends js.Any {
val pure: Boolean = js.native
val withRef: Boolean = js.native
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactRedux/Options.scala | Scala | apache-2.0 | 1,334 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{InputStream, NotSerializableException}
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.collection.Map
import scala.collection.mutable.Queue
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import akka.actor.{Props, SupervisorStrategy}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark._
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.input.FixedLengthBinaryInputFormat
import org.apache.spark.rdd.{RDD, RDDOperationScope}
import org.apache.spark.serializer.SerializationDebugger
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContextState._
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.{ActorReceiver, ActorSupervisorStrategy, Receiver}
import org.apache.spark.streaming.scheduler.{JobScheduler, StreamingListener}
import org.apache.spark.streaming.ui.{StreamingJobProgressListener, StreamingTab}
import org.apache.spark.util.{CallSite, ShutdownHookManager, ThreadUtils, Utils}
/**
* Main entry point for Spark Streaming functionality. It provides methods used to create
* [[org.apache.spark.streaming.dstream.DStream]]s from various input sources. It can be either
* created by providing a Spark master URL and an appName, or from a org.apache.spark.SparkConf
* configuration (see core Spark documentation), or from an existing org.apache.spark.SparkContext.
* The associated SparkContext can be accessed using `context.sparkContext`. After
* creating and transforming DStreams, the streaming computation can be started and stopped
* using `context.start()` and `context.stop()`, respectively.
* `context.awaitTermination()` allows the current thread to wait for the termination
* of the context by `stop()` or by an exception.
*/
class StreamingContext private[streaming] (
sc_ : SparkContext,
cp_ : Checkpoint,//检查点
batchDur_ : Duration //设定Streaming每个批次的积累时间,时间范围
) extends Logging {
/**
* Create a StreamingContext using an existing SparkContext.
* 使用存在的sparkcontext创建一个StreamingContext
* @param sparkContext existing SparkContext
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(sparkContext: SparkContext, batchDuration: Duration) = {
this(sparkContext, null, batchDuration)
}
/**
* Create a StreamingContext by providing the configuration necessary for a new SparkContext.
* 通过提供一个新的sparkcontext必要配置创建一个StreamingContext
* @param conf a org.apache.spark.SparkConf object specifying Spark parameters
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(conf: SparkConf, batchDuration: Duration) = {
this(StreamingContext.createNewSparkContext(conf), null, batchDuration)
}
/**
* Create a StreamingContext by providing the details necessary for creating a new SparkContext.
* @param master cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName a name for your job, to display on the cluster web UI
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(
master: String,
appName: String,
batchDuration: Duration,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(StreamingContext.createNewSparkContext(master, appName, sparkHome, jars, environment),
null, batchDuration)
}
/**
* Recreate a StreamingContext from a checkpoint file.
* 从检查点文件重新创建StreamingContext
* @param path Path to the directory that was specified as the checkpoint directory
* @param hadoopConf Optional, configuration object if necessary for reading from
* HDFS compatible filesystems
*/
def this(path: String, hadoopConf: Configuration) =
this(null, CheckpointReader.read(path, new SparkConf(), hadoopConf).get, null)
/**
* Recreate a StreamingContext from a checkpoint file.
* \\从检查点文件重新创建StreamingContext
* @param path Path to the directory that was specified as the checkpoint directory
*/
def this(path: String) = this(path, SparkHadoopUtil.get.conf)
/**
* Recreate a StreamingContext from a checkpoint file using an existing SparkContext.
* 从一个检查点文件创建一个StreamingContext,使用现有的sparkcontext
* @param path Path to the directory that was specified as the checkpoint directory
* @param sparkContext Existing SparkContext
*/
def this(path: String, sparkContext: SparkContext) = {
this(
sparkContext,
CheckpointReader.read(path, sparkContext.conf, sparkContext.hadoopConfiguration).get,
null)
}
if (sc_ == null && cp_ == null) {
throw new Exception("Spark Streaming cannot be initialized with " +
"both SparkContext and checkpoint as null")
}
private[streaming] val isCheckpointPresent = (cp_ != null)
private[streaming] val sc: SparkContext = {
if (sc_ != null) {
sc_
} else if (isCheckpointPresent) {
SparkContext.getOrCreate(cp_.createSparkConf())
} else {
throw new SparkException("Cannot create StreamingContext without a SparkContext")
}
}
if (sc.conf.get("spark.master") == "local" || sc.conf.get("spark.master") == "local[1]") {
logWarning("spark.master should be set as local[n], n > 1 in local mode if you have receivers" +
" to get data, otherwise Spark jobs will not get resources to process the received data.")
}
private[streaming] val conf = sc.conf
private[streaming] val env = sc.env
private[streaming] val graph: DStreamGraph = {
if (isCheckpointPresent) {
cp_.graph.setContext(this)
cp_.graph.restoreCheckpointData()
cp_.graph
} else {
require(batchDur_ != null, "Batch duration for StreamingContext cannot be null")
val newGraph = new DStreamGraph()
newGraph.setBatchDuration(batchDur_)
newGraph
}
}
private val nextInputStreamId = new AtomicInteger(0)
private[streaming] var checkpointDir: String = {
if (isCheckpointPresent) {
sc.setCheckpointDir(cp_.checkpointDir)
cp_.checkpointDir
} else {
null
}
}
private[streaming] val checkpointDuration: Duration = {
if (isCheckpointPresent) cp_.checkpointDuration else graph.batchDuration
}
private[streaming] val scheduler = new JobScheduler(this)//定时生成Spark Job
private[streaming] val waiter = new ContextWaiter //用于等待任务执行结束
private[streaming] val progressListener = new StreamingJobProgressListener(this)//监听streaming Job,更新StreamingTab显示
//SparkUI负责展示
private[streaming] val uiTab: Option[StreamingTab] =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(new StreamingTab(this))
} else {
None
}
/* Initializing a streamingSource to register metrics */
//测量数据源
private val streamingSource = new StreamingSource(this)
private var state: StreamingContextState = INITIALIZED
private val startSite = new AtomicReference[CallSite](null)
private[streaming] def getStartSite(): CallSite = startSite.get()
private var shutdownHookRef: AnyRef = _
// The streaming scheduler and other threads started by the StreamingContext
// should not inherit jobs group and job descriptions from the thread that
// start the context. This configuration allows jobs group and job description
// to be cleared in threads related to streaming. See SPARK-10649.
sparkContext.conf.set("spark.localProperties.clone", "true")
conf.getOption("spark.streaming.checkpoint.directory").foreach(checkpoint)
/**
* Return the associated Spark context
* 返回Spark相关联的上下文
*/
def sparkContext: SparkContext = sc
/**
* Set each DStreams in this context to remember RDDs it generated in the last given duration.
* DStreams remember RDDs only for a limited duration of time and releases them for garbage
* collection. This method allows the developer to specify how long to remember the RDDs (
* if the developer wishes to query old data outside the DStream computation).
* @param duration Minimum duration that each DStream should remember its RDDs
*/
def remember(duration: Duration) {
graph.remember(duration)
}
/**
* Set the context to periodically checkpoint the DStream operations for driver
* fault-tolerance.
* @param directory HDFS-compatible directory where the checkpoint data will be reliably stored.
* Note that this must be a fault-tolerant file system like HDFS for
*/
def checkpoint(directory: String) {
if (directory != null) {
val path = new Path(directory)
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
fs.mkdirs(path)
val fullPath = fs.getFileStatus(path).getPath().toString
sc.setCheckpointDir(fullPath)
checkpointDir = fullPath
} else {
checkpointDir = null
}
}
private[streaming] def isCheckpointingEnabled: Boolean = {
checkpointDir != null
}
private[streaming] def initialCheckpoint: Checkpoint = {
if (isCheckpointPresent) cp_ else null
}
private[streaming] def getNewInputStreamId() = nextInputStreamId.getAndIncrement()
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
* 在一个范围,在这个身体中创建的所有新dstreams将相同的范围的一部分执行的代码块
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withScope[U](body: => U): U = sparkContext.withScope(body)
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withNamedScope[U](name: String)(body: => U): U = {
RDDOperationScope.withScope(sc, name, allowNesting = false, ignoreParent = false)(body)
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html *
* @param receiver Custom implementation of Receiver
*
* @deprecated As of 1.0.0", replaced by `receiverStream`.
*/
@deprecated("Use receiverStream", "1.0.0")
def networkStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("network stream") {
receiverStream(receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* 创建自定义实现接收器的输入流
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html *
* @param receiver Custom implementation of Receiver
*/
def receiverStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("receiver stream") {
new PluggableInputDStream[T](this, receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented actor receiver.
* 创建一个输入流与任何任意用户实现的接收器,
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param props Props object defining creation of the actor
* @param name Name of the actor
* @param storageLevel RDD storage level (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*
* @note An important point to note:
* Since Actor may exist outside the spark framework, It is thus user's responsibility
* to ensure the type safety, i.e parametrized type of data received and actorStream
* should be same.
*/
def actorStream[T: ClassTag](
props: Props,
name: String,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2,
supervisorStrategy: SupervisorStrategy = ActorSupervisorStrategy.defaultStrategy
): ReceiverInputDStream[T] = withNamedScope("actor stream") {
receiverStream(new ActorReceiver[T](props, name, storageLevel, supervisorStrategy))
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes is interpreted as UTF8 encoded `\\n` delimited
* lines.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*/
def socketTextStream(
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = withNamedScope("socket text stream") {
socketStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel)
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes it interepreted as object using the given
* converter.
* 数据接收使用TCP套接字和接收的字节interepreted为对象使用给定的转换器
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param converter Function to convert the byte stream to objects
* @param storageLevel Storage level to use for storing the received objects
* @tparam T Type of the objects received (after converting bytes to objects)
*/
def socketStream[T: ClassTag](
hostname: String,
port: Int,
converter: (InputStream) => Iterator[T],
storageLevel: StorageLevel
): ReceiverInputDStream[T] = {
new SocketInputDStream[T](this, hostname, port, converter, storageLevel)
}
/**
* Create a input stream from network source hostname:port, where data is received
* as serialized blocks (serialized using the Spark's serializer) that can be directly
* pushed into the block manager without deserializing them. This is the most efficient
* way to receive data.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @tparam T Type of the objects in the received blocks
*/
def rawSocketStream[T: ClassTag](
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[T] = withNamedScope("raw socket stream") {
new RawInputDStream[T](this, hostname, port, storageLevel)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String, filter: Path => Boolean, newFilesOnly: Boolean): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @param conf Hadoop configuration
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String,
filter: Path => Boolean,
newFilesOnly: Boolean,
conf: Configuration): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly, Option(conf))
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as text files (using key as LongWritable, value
* as Text and input format as TextInputFormat). Files must be written to the
* monitored directory by "moving" them from another location within the same
* file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
*/
def textFileStream(directory: String): DStream[String] = withNamedScope("text file stream") {
fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString)
}
/**
* :: Experimental ::
*
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as flat binary files, assuming a fixed length per record,
* generating one byte array per record. Files must be written to the monitored directory
* by "moving" them from another location within the same file system. File names
* starting with . are ignored.
*
* '''Note:''' We ensure that the byte array for each record in the
* resulting RDDs of the DStream has the provided record length.
*
* @param directory HDFS directory to monitor for new file
* @param recordLength length of each record in bytes
*/
@Experimental
def binaryRecordsStream(
directory: String,
recordLength: Int): DStream[Array[Byte]] = withNamedScope("binary records stream") {
val conf = sc_.hadoopConfiguration
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = fileStream[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](
directory, FileInputDStream.defaultFilter: Path => Boolean, newFilesOnly = true, conf)
val data = br.map { case (k, v) =>
val bytes = v.getBytes
require(bytes.length == recordLength, "Byte array does not have correct length. " +
s"${bytes.length} did not equal recordLength: $recordLength")
bytes
}
data
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
* 创建从一个队列的RDDS输入流,在每批,它将处理一个或所有的RDDS由队列返回
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean = true
): InputDStream[T] = {
queueStream(queue, oneAtATime, sc.makeRDD(Seq[T](), 1))
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
* 创建从一个队列的RDDS输入流
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @param defaultRDD Default RDD is returned by the DStream when the queue is empty.
* Set as null if no RDD should be returned when empty
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean,
defaultRDD: RDD[T]
): InputDStream[T] = {
new QueueInputDStream(this, queue, oneAtATime, defaultRDD)
}
/**
* Create a unified DStream from multiple DStreams of the same type and same slide duration.
*/
def union[T: ClassTag](streams: Seq[DStream[T]]): DStream[T] = withScope {
new UnionDStream[T](streams.toArray)
}
/**
* Create a new DStream in which each RDD is generated by applying a function on RDDs of
* the DStreams.
* 创建一个新的dstream其中每个RDD是应用功能的dstreams RDDS生成
*/
def transform[T: ClassTag](
dstreams: Seq[DStream[_]],
transformFunc: (Seq[RDD[_]], Time) => RDD[T]
): DStream[T] = withScope {
new TransformedDStream[T](dstreams, sparkContext.clean(transformFunc))
}
/** Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
* receiving system events related to streaming.
*/
def addStreamingListener(streamingListener: StreamingListener) {
scheduler.listenerBus.addListener(streamingListener)
}
private def validate() {
assert(graph != null, "Graph is null")
graph.validate()
require(
!isCheckpointingEnabled || checkpointDuration != null,
"Checkpoint directory has been set, but the graph checkpointing interval has " +
"not been set. Please use StreamingContext.checkpoint() to set the interval."
)
// Verify whether the DStream checkpoint is serializable
if (isCheckpointingEnabled) {
val checkpoint = new Checkpoint(this, Time.apply(0))
try {
Checkpoint.serialize(checkpoint, conf)
} catch {
case e: NotSerializableException =>
throw new NotSerializableException(
"DStream checkpointing has been enabled but the DStreams with their functions " +
"are not serializable\\n" +
SerializationDebugger.improveException(checkpoint, e).getMessage()
)
}
}
if (Utils.isDynamicAllocationEnabled(sc.conf)) {
logWarning("Dynamic Allocation is enabled for this application. " +
"Enabling Dynamic allocation for Spark Streaming applications can cause data loss if " +
"Write Ahead Log is not enabled for non-replayable sources like Flume. " +
"See the programming guide for details on how to enable the Write Ahead Log")
}
}
/**
* :: DeveloperApi ::
*
* Return the current state of the context. The context can be in three possible states -
* - StreamingContextState.INTIALIZED - The context has been created, but not been started yet.
* Input DStreams, transformations and output operations can be created on the context.
* - StreamingContextState.ACTIVE - The context has been started, and been not stopped.
* Input DStreams, transformations and output operations cannot be created on the context.
* - StreamingContextState.STOPPED - The context has been stopped and cannot be used any more.
*/
@DeveloperApi
def getState(): StreamingContextState = synchronized {
state
}
/**
* Start the execution of the streams.
* 主要调用JobScheduler的方法Start方法
* @throws IllegalStateException if the StreamingContext is already stopped.
*/
def start(): Unit = synchronized {
state match {
case INITIALIZED =>
startSite.set(DStream.getCreationSite())
StreamingContext.ACTIVATION_LOCK.synchronized {
StreamingContext.assertNoOtherContextIsActive()
try {
validate()
// Start the streaming scheduler in a new thread, so that thread local properties
// like call sites and job groups can be reset without affecting those of the
// current thread.
ThreadUtils.runInNewThread("streaming-start") {
sparkContext.setCallSite(startSite.get)
sparkContext.clearJobGroup()
sparkContext.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false")
scheduler.start()
}
state = StreamingContextState.ACTIVE
} catch {
case NonFatal(e) =>
logError("Error starting the context, marking it as stopped", e)
scheduler.stop(false)
state = StreamingContextState.STOPPED
throw e
}
StreamingContext.setActiveContext(this)
}
shutdownHookRef = ShutdownHookManager.addShutdownHook(
StreamingContext.SHUTDOWN_HOOK_PRIORITY)(stopOnShutdown)
// Registering Streaming Metrics at the start of the StreamingContext
assert(env.metricsSystem != null)
env.metricsSystem.registerSource(streamingSource)
uiTab.foreach(_.attach())
logInfo("StreamingContext started")
case ACTIVE =>
logWarning("StreamingContext has already been started")
case STOPPED =>
throw new IllegalStateException("StreamingContext has already been stopped")
}
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
* 等待执行停止,在执行过程中发生的任何异常将被抛出这个线程.
*/
def awaitTermination() {
waiter.waitForStopOrError()
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
* @param timeout time to wait in milliseconds
*
* @deprecated As of 1.3.0, replaced by `awaitTerminationOrTimeout(Long)`.
*/
@deprecated("Use awaitTerminationOrTimeout(Long) instead", "1.3.0")
def awaitTermination(timeout: Long) {
waiter.waitForStopOrError(timeout)
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
* 等待执行停止,执行过程中发生的任何异常都将在此线程中引发
* @param timeout time to wait in milliseconds 超时时间以毫秒为单位
* @return `true` if it's stopped; or throw the reported error during the execution; or `false`
* if the waiting time elapsed before returning from the method.
* 返回true,如果它停止,或者在执行过程中抛出了异常错误,或者在返回方法之前等待时间超时,返回false
*/
def awaitTerminationOrTimeout(timeout: Long): Boolean = {
waiter.waitForStopOrError(timeout)
}
/**
* Stop the execution of the streams immediately (does not wait for all received data
* to be processed). By default, if `stopSparkContext` is not specified, the underlying
* SparkContext will also be stopped. This implicit behavior can be configured using the
* SparkConf configuration spark.streaming.stopSparkContextByDefault.
*
* @param stopSparkContext If true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
*/
def stop(
stopSparkContext: Boolean = conf.getBoolean("spark.streaming.stopSparkContextByDefault", true)
): Unit = synchronized {
stop(stopSparkContext, false)
}
/**
* Stop the execution of the streams, with option of ensuring all received data
* has been processed.
*
* @param stopSparkContext if true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
* @param stopGracefully if true, stops gracefully by waiting for the processing of all
* received data to be completed
*/
def stop(stopSparkContext: Boolean, stopGracefully: Boolean): Unit = {
var shutdownHookRefToRemove: AnyRef = null
synchronized {
try {
state match {
case INITIALIZED =>
logWarning("StreamingContext has not been started yet")
case STOPPED =>
logWarning("StreamingContext has already been stopped")
case ACTIVE =>
scheduler.stop(stopGracefully)
// Removing the streamingSource to de-register the metrics on stop()
env.metricsSystem.removeSource(streamingSource)
uiTab.foreach(_.detach())
StreamingContext.setActiveContext(null)
waiter.notifyStop()
if (shutdownHookRef != null) {
shutdownHookRefToRemove = shutdownHookRef
shutdownHookRef = null
}
logInfo("StreamingContext stopped successfully")
}
} finally {
// The state should always be Stopped after calling `stop()`, even if we haven't started yet
state = STOPPED
}
}
if (shutdownHookRefToRemove != null) {
ShutdownHookManager.removeShutdownHook(shutdownHookRefToRemove)
}
// Even if we have already stopped, we still need to attempt to stop the SparkContext because
// a user might stop(stopSparkContext = false) and then call stop(stopSparkContext = true).
if (stopSparkContext) sc.stop()
}
private def stopOnShutdown(): Unit = {
val stopGracefully = conf.getBoolean("spark.streaming.stopGracefullyOnShutdown", false)
logInfo(s"Invoking stop(stopGracefully=$stopGracefully) from shutdown hook")
// Do not stop SparkContext, let its own shutdown hook stop it
stop(stopSparkContext = false, stopGracefully = stopGracefully)
}
}
/**
* StreamingContext object contains a number of utility functions related to the
* StreamingContext class.
*/
object StreamingContext extends Logging {
/**
* Lock that guards activation of a StreamingContext as well as access to the singleton active
* StreamingContext in getActiveOrCreate().
*/
private val ACTIVATION_LOCK = new Object()
private val SHUTDOWN_HOOK_PRIORITY = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY + 1
private val activeContext = new AtomicReference[StreamingContext](null)
private def assertNoOtherContextIsActive(): Unit = {
ACTIVATION_LOCK.synchronized {
if (activeContext.get() != null) {
throw new IllegalStateException(
"Only one StreamingContext may be started in this JVM. " +
"Currently running StreamingContext was started at" +
activeContext.get.getStartSite().longForm)
}
}
}
private def setActiveContext(ssc: StreamingContext): Unit = {
ACTIVATION_LOCK.synchronized {
activeContext.set(ssc)
}
}
/**
* :: Experimental ::
*
* Get the currently active context, if there is one. Active means started but not stopped.
*/
@Experimental
def getActive(): Option[StreamingContext] = {
ACTIVATION_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* @deprecated As of 1.3.0, replaced by implicit functions in the DStream companion object.
* This is kept here only for backward compatibility.
*/
@deprecated("Replaced by implicit functions in the DStream companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def toPairDStreamFunctions[K, V](stream: DStream[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)
: PairDStreamFunctions[K, V] = {
DStream.toPairDStreamFunctions(stream)(kt, vt, ord)
}
/**
* :: Experimental ::
*
* Either return the "active" StreamingContext (that is, started but not stopped), or create a
* new StreamingContext that is
* @param creatingFunc Function to create a new StreamingContext
*/
@Experimental
def getActiveOrCreate(creatingFunc: () => StreamingContext): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { creatingFunc() }
}
}
/**
* :: Experimental ::
*
* Either get the currently active StreamingContext (that is, started but not stopped),
* OR recreate a StreamingContext from checkpoint data in the given path. If checkpoint data
* does not exist in the provided, then create a new StreamingContext by calling the provided
* `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
@Experimental
def getActiveOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { getOrCreate(checkpointPath, creatingFunc, hadoopConf, createOnError) }
}
}
/**
* Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
* If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
* recreated from the checkpoint data. If the data does not exist, then the StreamingContext
* will be created by called the provided `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
def getOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), hadoopConf, createOnError)
checkpointOption.map(new StreamingContext(null, _, null)).getOrElse(creatingFunc())
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to StreamingContext.
*/
def jarOfClass(cls: Class[_]): Option[String] = SparkContext.jarOfClass(cls)
private[streaming] def createNewSparkContext(conf: SparkConf): SparkContext = {
new SparkContext(conf)
}
private[streaming] def createNewSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Seq[String],
environment: Map[String, String]
): SparkContext = {
val conf = SparkContext.updatedConf(
new SparkConf(), master, appName, sparkHome, jars, environment)
new SparkContext(conf)
}
private[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = {
if (prefix == null) {
time.milliseconds.toString
} else if (suffix == null || suffix.length ==0) {
prefix + "-" + time.milliseconds
} else {
prefix + "-" + time.milliseconds + "." + suffix
}
}
}
| tophua/spark1.52 | streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala | Scala | apache-2.0 | 37,952 |
package com.blinkbox.books.search.ingester
import com.blinkbox.books.config._
import com.blinkbox.books.rabbitmq.RabbitMqConsumer
import com.blinkbox.books.rabbitmq.RabbitMqConfirmedPublisher
import com.blinkbox.books.rabbitmq.RabbitMqConfirmedPublisher.PublisherConfiguration
import com.typesafe.config.Config
import java.net.URL
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
import com.blinkbox.books.rabbitmq.RabbitMqConfig
case class AppConfig(
index: String,
retryTime: FiniteDuration,
requestTimeout: FiniteDuration,
rabbitMq: RabbitMqConfig,
bookMetadataInput: RabbitMqConsumer.QueueConfiguration,
bookMetadataErrorOutput: PublisherConfiguration,
priceDataInput: RabbitMqConsumer.QueueConfiguration,
priceDataErrorOutput: PublisherConfiguration,
solr: SolrConfig)
case class SolrConfig(url: URL)
object AppConfig {
def apply(config: Config): AppConfig = {
val serviceConfig = config.getConfig("service.searchIngester")
AppConfig(
serviceConfig.getString("index"),
serviceConfig.getFiniteDuration("retryTime"),
serviceConfig.getFiniteDuration("requestTimeout"),
RabbitMqConfig(config),
RabbitMqConsumer.QueueConfiguration(serviceConfig.getConfig("bookMetadataInput")),
RabbitMqConfirmedPublisher.PublisherConfiguration(serviceConfig.getConfig("bookMetadataErrorOutput")),
RabbitMqConsumer.QueueConfiguration(serviceConfig.getConfig("priceDataInput")),
RabbitMqConfirmedPublisher.PublisherConfiguration(serviceConfig.getConfig("priceDataErrorOutput")),
SolrConfig(config.getConfig("solr")))
}
}
object SolrConfig {
def apply(config: Config): SolrConfig = SolrConfig(config.getUrl("url"))
}
| blinkboxbooks/search-ingester-service.scala | src/main/scala/com/blinkbox/books/search/ingester/AppConfig.scala | Scala | mit | 1,712 |
val fibs: Stream[BigInt] = BigInt(0) #:: BigInt(1) #:: fibs.zip(fibs.tail).map { n => n._1 + n._2 }
fibs take 5 foreach println
| samendez/atom-script | examples/fibo.scala | Scala | mit | 129 |
/*******************************************************************************
Copyright (c) 2012-2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.tests
import junit.framework.Test
import junit.framework.TestSuite
import junit.framework.TestCase
import junit.framework.Assert._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.{AddressManager, Operator}
import kr.ac.kaist.jsaf.Shell
import kr.ac.kaist.jsaf.ShellParameters
import kr.ac.kaist.jsaf.compiler.Predefined
// class definition for eclipse JUnit runner
class TypingOperatorJUTest
object TypingOperatorJUTest {
Shell.pred = new Predefined(new ShellParameters())
val joinCases:List[(String, List[Any], List[Any], List[Any], Boolean)] = List(
("{} + {} = {}", List(), List(), List(), true),
("{1} + {2} = {1, 2}", List(1), List(2), List(1, 2), true),
("{UndefTop} + {true} = {UndefTop, true}", List(UndefTop), List(true), List(UndefTop, true), true),
("{NullTop} + {} = {NullTop}", List(NullTop), List(), List(NullTop), true),
("{true} + {false} = {true, false}", List(true), List(false), List(true, false), true),
("{false} + {NullTop} = {false, NullTop}", List(false), List(NullTop), List(false, NullTop), true),
("{-1} + {true} = {-1, true}", List(-1), List(true), List(-1, true), true),
("{-3} + {0.2} = {-3, 0.2}", List(-3), List(0.2), List(-3, 0.2), true),
("{1, 2} + {false} = {1, 2, false}", List(1, 2), List(false), List(1, 2, false), true),
("{1, \\"s\\"} + {false} = {1, \\"s\\", false}", List(1, "s"), List(false), List(1, "s", false), true),
("{\\"1\\", \\"0\\"} + {1} = {\\"1\\", \\"0\\", 1}", List("1", "0"), List(1), List("1", "0", 1), true),
("{NaN} + {true} = {NaN, true}", List(NaN), List(true), List(NaN, true), true),
("{PosInf} + {NullTop} = {PosInf, NullTop}", List(PosInf), List(NullTop), List(PosInf, NullTop), true),
("{UndefTop} + {NegInf} = {UndefTop, NegInf}", List(UndefTop), List(NegInf), List(UndefTop, NegInf), true),
("{\\"foo\\"} + {\\"1\\"} = {\\"foo\\", \\"1\\"}", List("foo"), List("1"), List("foo", "1"), true),
("{UndefTop} + {\\"str\\"} = {UndefTop, \\"str\\"}", List(UndefTop), List("str"), List(UndefTop, "str"), true),
("{NaN} + {-1} = {NaN, -1}", List(NaN), List(-1), List(NaN, -1), true),
("{PosInf} + {NegInf} = {PosInf, NegInf}", List(PosInf), List(NegInf), List(PosInf, NegInf), true),
("{NaN} + {NegInf} = {NaN, NegInf}", List(NaN), List(NegInf), List(NaN, NegInf), true),
("{NaN} + {PosInf} = {NaN, PosInf}", List(NaN), List(PosInf), List(NaN, PosInf), true),
("{1, 2} + {-3, 4.3} = {1, 2, -3, 4.3}", List(1, 2), List(-3, 4.3), List(1, 2, -3, 4.3), true),
("{UndefTop, NullTop, true} + {false} = {UndefTop, NullTop, true, false}", List(UndefTop, NullTop, true), List(false), List(UndefTop, NullTop, true, false), true),
("{UndefTop, NullTop, true, false} + {NaN, 1, \\"1\\", \\"str\\"} = {UndefTop, NullTop, true, false, NaN, 1, \\"1\\", \\"str\\"}", List(UndefTop, NullTop, true, false), List(NaN, 1, "1", "str"), List(UndefTop, NullTop, true, false, NaN, 1, "1", "str"), true),
("{\\"-1\\"} + {\\"3.5\\"} = {\\"-1\\", \\"3.5\\"}", List("-1"), List("3.5"), List("-1", "3.5"), true),
("{\\"1\\"} + {} = {\\"1\\"}", List("1"), List(), List("1"), true),
("{\\"-1\\"} + {} = {\\"-1\\"}", List("-1"), List(), List("-1"), true),
("{UndefTop} + {} = {UndefTop}", List(UndefTop), List(), List(UndefTop), true),
("{NullTop} + {} = {NullTop}", List(NullTop), List(), List(NullTop), true),
("{true} + {} = {true}", List(true), List(), List(true), true)
// TODO
)
val binCases:List[TypeOperator] = List(
BinBitOr("{NaN} | {1, 2} = {1, 2}", List(NaN), List(1,2), List(1, 2), true),
BinBitOr("{-1, 3.2} | {NaN} = {-1, 3.2}", List(-1, 3.2), List(NaN), List(-1, 3), true),
BinBitOr("{1} | {2} = {3}", List(1), List(2), List(3), true),
BinBitOr("{1} | {-1} = {-1}", List(1), List(-1), List(-1), true),
BinBitOr("{1} | {1, 2} = {1, 3}", List(1), List(1, 2), List(1, 3), true),
BinBitOr("{1} | {-1, 2.1} = {-1, 3}", List(1), List(-1, 2.1), List(-1, 3), true),
BinBitOr("{-1} | {-2} = {-1}", List(-1), List(-2), List(-1), true),
BinBitOr("{-1} | {1} = {-1}", List(-1), List(1), List(-1), true),
BinBitOr("{-1} | {1, 2} >= {-1, -1}", List(-1), List(1, 2), List(-1, -1), false),
BinBitOr("{-2} | {1, 2} = {-1, -2}", List(-2), List(1, 2), List(-1, -2), true),
BinBitOr("{2.1} | {1, 2} = {3, 2}", List(2.1), List(1, 2), List(3, 2), true),
BinBitOr("{-1} | {-1, 3.1} >= {-1, -1}", List(-1), List(-1, 3.1), List(-1, -1), false),
BinBitOr("{7.1} | {-1, 3.1} = {-1, 7}", List(7.1), List(-1, 3.1), List(-1, 7), true),
BinBitOr("{1, 2} | {5} = {5, 7}", List(1, 2), List(5), List(5, 7), true),
BinBitOr("{1, 2} | {-3} = {-3, -1}", List(1, 2), List(-3), List(-3, -1), true),
BinBitOr("{1, 2} | {5.1} = {5, 7}", List(1, 2), List(5.1), List(5, 7), true),
BinBitOr("{1, 2} | {5, 6} = {5, 7, 7, 6}", List(1, 2), List(5, 6), List(5, 7, 7, 6), true),
BinBitOr("{1, 2} | {-4, 7.5} = {-3, 7, -2, 7}", List(1, 2), List(-4, 7.5), List(-3, 7, -2, 7), true),
BinBitOr("{-3, -2} | {2} >= {-1, -2}", List(-3, -2), List(2), List(-1, -2), false),
BinBitOr("{-3, 2.5} | {2} = {-1, 2}", List(-3, 2.5), List(2), List(-1, 2), true),
BinBitOr("{-3, -2} | {-1, 1} >= {-1, -3, -1, -1}", List(-3, -2), List(-1, 1), List(-1, -3, -1, -1), false),
BinBitOr("{-3, 2.5} | {-1, 1} = {-1, -3, -1, 3}", List(-3, 2.5), List(-1, 1), List(-1, -3, -1, 3), true),
BinBitOr("{PosInf} | {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitOr("{PosInf} | {1} = {1}", List(PosInf), List(1), List(1), true),
BinBitOr("{PosInf} | {3.5} = {3}", List(PosInf), List(3.5), List(3), true),
BinBitOr("{1} | {NegInf} = {1}", List(1), List(NegInf), List(1), true),
BinBitOr("{-1} | {NegInf} = {-1}", List(-1), List(NegInf), List(-1), true),
BinBitOr("{PosInf, NegInf} | {1} = {1}", List(PosInf, NegInf), List(1), List(1), true),
BinBitAnd("{1} & {2} = {0}", List(1), List(2), List(0), true),
BinBitAnd("{1} & {-1} = {1}", List(1), List(-1), List(1), true),
BinBitAnd("{1} & {2, 1} = {0, 1}", List(1), List(2, 1), List(0, 1), true),
BinBitAnd("{1} & {-2, 3} = {0, 1}", List(1), List(-2, 3), List(0, 1), true),
BinBitAnd("{1} & {-2, 3.4} = {0, 1}", List(1), List(-2, 3.4), List(0, 1), true),
BinBitAnd("{3} & {-4, 3.2} = {0, 3}", List(3), List(-4, 3.2), List(0, 3), true),
BinBitAnd("{-2} & {3} = {2}", List(-2), List(3), List(2), true),
BinBitAnd("{-2} & {-3} = {-4}", List(-2), List(-3), List(-4), true),
BinBitAnd("{-2} & {3, 5} = {2, 4}", List(-2), List(3, 5), List(2, 4), true),
BinBitAnd("{-2} & {-3, -6} >= {-4, -6}", List(-2), List(-3, -6), List(-4, -6), false),
BinBitAnd("{-2} & {-3, 6.5} = {-4, 6}", List(-2), List(-3, 6.5), List(-4, 6), true),
BinBitAnd("{-2} & {-3, 3} = {-4, 2}", List(-2), List(-3, 5), List(-4, 2), true),
BinBitAnd("{6, 7} & {1} = {0, 1}", List(6, 7), List(1), List(0, 1), true),
BinBitAnd("{6, 7} & {-5} = {2, 3}", List(6, 7), List(-5), List(2, 3), true),
BinBitAnd("{3.4, -6} & {1} >= {1, 0}", List(3.4, -6), List(1), List(1, 0), false),
BinBitAnd("{3.4, -6} & {-1} = {3, -6}", List(3.4, -6), List(-1), List(3, -6), true),
BinBitAnd("{3.4, -10} & {5.5} >= {1, 4}", List(3.4, -10), List(5.5), List(1, 4), false),
BinBitAnd("{3.4, -6} & {1, 2} >= {1, 2, 0, 2}", List(3.4, -6), List(1, 2), List(1, 2, 0, 2), false),
BinBitAnd("{3.4, -6} & {3.4, -6} = {3, 2, 2, -6}", List(3.4, -6), List(3.4, -6), List(3, 2, 2, -6), true),
BinBitAnd("{PosInf} & {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitAnd("{PosInf} & {1} = {0}", List(PosInf), List(1), List(0), true),
BinBitAnd("{PosInf} & {1, 2} = {0}", List(PosInf), List(1, 2), List(0), true),
BinBitAnd("{1} & {NegInf} = {0}", List(1), List(NegInf), List(0), true),
BinBitAnd("{2.5, -3} & {NegInf} = {0}", List(2.5, -3), List(NegInf), List(0), true),
BinBitAnd("{1} & {PosInf, NegInf} = {0}", List(1), List(PosInf, NegInf), List(0), true),
BinBitAnd("{NaN} & {-3} = {0}", List(NaN), List(-3), List(0), true),
BinBitXor("{3} ^ {2} = {1}", List(3), List(2), List(1), true),
BinBitXor("{3} ^ {1, 2} = {2, 1}", List(3), List(1, 2), List(2, 1), true),
BinBitXor("{3} ^ {-1} = {-4}", List(3), List(-1), List(-4), true),
BinBitXor("{-3} ^ {-3, 3.6} = {0, -2}", List(-3), List(-3, 3.6), List(0, -2), true),
BinBitXor("{-3} ^ {-1} = {2}", List(-3), List(-1), List(2), true),
BinBitXor("{-3} ^ {1, 2} = {-4, -1}", List(-3), List(1, 2), List(-4, -1), true),
BinBitXor("{-3} ^ {-1, 0.4} = {2, -3}", List(-3), List(-1, 0.4), List(2, -3), true),
BinBitXor("{3, 7} ^ {2} = {1, 5}", List(3, 7), List(2), List(1, 5), true),
BinBitXor("{3, 7} ^ {2, 6} = {1, 5, 5, 1}", List(3, 7), List(2, 6), List(1, 5, 5, 1), true),
BinBitXor("{3, 7} ^ {-4} = {-1, -5}", List(3, 7), List(-4), List(-1, -5), true),
BinBitXor("{3, 7} ^ {-6, 1.3} = {-7, 2, -3, 6}", List(3, 7), List(-6, 1.3), List(-7, 2, -3, 6), true),
BinBitXor("{0.2, -4} ^ {3} = {3, -1}", List(0.2, -4), List(3), List(3, -1), true),
BinBitXor("{0.2, -4} ^ {3, 6} = {3, -1, 6, -6}", List(0.2, -4), List(3, 6), List(3, -1, 6, -6), true),
BinBitXor("{0.2, -4} ^ {0.5} = {0, -4}", List(0.2, -4), List(0.5), List(0, -4), true),
BinBitXor("{0.2, -4} ^ {0.5, 3.6} = {0, -4, 3, -1}", List(0.2, -4), List(0.5, 3.6), List(0, -4, 3, -1), true),
BinBitXor("{PosInf} ^ {3} = {3}", List(PosInf), List(3), List(3), true),
BinBitXor("{PosInf} ^ {NegInf} = {0}", List(PosInf), List(NegInf), List(0), true),
BinBitXor("{PosInf} ^ {-3} = {-3}", List(PosInf), List(-3), List(-3), true),
BinBitXor("{NegInf} ^ {3.5} = {3}", List(NegInf), List(3.5), List(3), true),
BinBitXor("{2, 4} ^ {NegInf} = {2, 4}", List(2, 4), List(NegInf), List(2, 4), true),
BinBitXor("{PosInf, NegInf} ^ {3, 2} = {3, 2}", List(PosInf, NegInf), List(3, 2), List(3, 2), true),
BinLShift("{3} << {1} = {6}", List(3), List(1), List(6), true),
BinLShift("{3} << {1.2} = {6}", List(3), List(1.2), List(6), true),
BinLShift("{3} << {-1.2} = {6}", List(3), List(-1.2), List(-2147483648), true),
BinLShift("{-3} << {1} = {-6}", List(-3), List(1), List(-6), true),
BinLShift("{-1} << {-3} = {-536870912}", List(-1), List(-3), List(-536870912), true),
BinLShift("{-1} << {-3, 2} >= {-536870912, -4}", List(-1), List(-3, 2), List(-536870912, -4), false),
BinLShift("{4} << {29} = {-2147483648}", List(4), List(29), List(-2147483648), true),
BinLShift("{-4} << {30} = {0}", List(-4), List(30), List(0), true),
BinLShift("{4} << {29, 28} = {-2147483648, 1073741824}", List(4), List(29, 28), List(-2147483648, 1073741824), true),
BinLShift("{4, 2} << {-2} = {0, -2147483648}", List(4, 2), List(-2), List(0, -2147483648), true),
BinRShift("{256} >> {3} = {32}", List(256), List(3), List(32), true),
BinRShift("{256} >> {-32} = {256}", List(256), List(-32), List(256), true),
BinRShift("{256} >> {-31} = {128}", List(256), List(-31), List(128), true),
BinRShift("{256} >> {-33} = {0}", List(256), List(-33), List(0), true),
BinRShift("{-256} >> {-3} = {-1}", List(-256), List(-3), List(-1), true),
BinRShift("{-256} >> {3} = {-32}", List(-256), List(3), List(-32), true),
BinRShift("{-256} >> {-0.5} = {-256}", List(-256), List(-0.5), List(-256), true),
BinRShift("{-256} >> {17, 31} >= {-1, -1}", List(-256), List(17, 31), List(-1, -1), false),
BinRShift("{343.4} >> {2} = {85}", List(343.4), List(2), List(85), true),
BinRShift("{256, 34} >> {2} = {64, 8}", List(256, 34), List(2), List(64, 8), true),
BinRShift("{34, -34} >> {2} = {64, -9}", List(34, -34), List(2), List(64, -9), true),
BinURShift("{32} >>> {2} = {8}", List(32), List(2), List(8), true),
BinURShift("{-32} >>> {2} = {1073741816}", List(-32), List(2), List(1073741816), true),
BinURShift("{-32} >>> {-1} = {1}", List(-32), List(-1), List(1), true),
BinURShift("{-32} >>> {30} = {3}", List(-32), List(30), List(3), true),
BinURShift("{-32} >>> {-1, 30} >= {1, 3}", List(-32), List(-1, 30), List(1, 3), false),
BinURShift("{564} >>> {30} = {0}", List(564), List(30), List(0), true),
BinURShift("{564} >>> {-30} = {141}", List(564), List(-30), List(141), true),
BinURShift("{-12345} >>> {31} = {1}", List(-12345), List(31), List(1), true),
BinURShift("{564, -32} >>> {30} = {0, 3}", List(564, -32), List(30), List(0, 3), true),
BinURShift("{34, 78} >>> {2} = {8, 19}", List(34, 78), List(2), List(8, 19), true),
BinURShift("{-65, -90} >>> {-2} >= {3, 3}", List(-65, -90), List(-2), List(3, 3), false),
BinURShift("{-65, -90} >>> {4} = {268435451, 268435450}", List(-65, -90), List(4), List(268435451, 268435450), true),
BinPlus("{NaN} + {2} = {NaN}", List(NaN), List(2), List(NaN), true),
BinPlus("{PosInf, NegInf} + {NaN} = {NaN}", List(PosInf, NegInf), List(NaN), List(NaN), true),
BinPlus("{PosInf} + {NegInf} = {NaN}", List(PosInf), List(NegInf), List(NaN), true),
BinPlus("{NegInf} + {PosInf} = {NaN}", List(NegInf), List(PosInf), List(NaN), true),
BinPlus("{NegInf} + {NegInf} = {NegInf}", List(NegInf), List(NegInf), List(NegInf), true),
BinPlus("{PosInf} + {PosInf} = {PosInf}", List(PosInf), List(PosInf), List(PosInf), true),
BinPlus("{PosInf, NegInf} + {PosInf, NegInf} = {NaN, PosInf, NegInf}", List(PosInf, NegInf), List(PosInf, NegInf), List(NaN, PosInf, NegInf), true),
BinPlus("{2} + {PosInf, NegInf} = {PosInf, NegInf}", List(2), List(PosInf, NegInf), List(PosInf, NegInf), true),
BinPlus("{PosInf, NegInf} + {-3.1} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3.1), List(PosInf, NegInf), true),
BinPlus("{PosInf} + {2} = {PosInf}", List(PosInf), List(2), List(PosInf), true),
BinPlus("{-4.3} + {PosInf} = {PosInf}", List(-4.3), List(PosInf), List(PosInf), true),
BinPlus("{NegInf} + {100} = {NegInf}", List(NegInf), List(100), List(NegInf), true),
BinPlus("{3} + {NegInf} = {NegInf}", List(3), List(NegInf), List(NegInf), true),
BinPlus("{1} + {2} = {3}", List(1), List(2), List(3), true),
BinPlus("{1} + {-4} = {-3}", List(1), List(-4), List(-3), true),
BinPlus("{1} + {3, 4} = {4, 5}", List(1), List(3, 4), List(4, 5), true),
BinPlus("{1, 2} + {4} = {5, 6}", List(1, 2), List(4), List(5, 6), true),
BinPlus("{-1} + {-4} = {-5}", List(-1), List(-4), List(-5), true),
BinPlus("{-1} + {3.4, -2} >= {2.4, -3}", List(-1), List(3.4, -2), List(2.4, -3), false),
BinPlus("{-1} + {0, 2} = {-1, 1}", List(-1), List(0, 2), List(-1, 1), true),
BinPlus("{-1} + {3} = {2}", List(-1), List(3), List(2), true),
BinPlus("{3.5} + {0.5} = {4}", List(3.5), List(0.5), List(4), true),
BinPlus("{3.5} + {0.5, 1.5} >= {4, 5}", List(3.5), List(0.5, 1.5), List(4, 5), false),
BinPlus("{3.5, 6.5} + {0.5} >= {3, 6}", List(3.5, 6.5), List(0.5), List(4, 7), false),
BinPlus("{2, 4} + {-5} >= {-3, -1}", List(2, 4), List(-5), List(-3, -1), false),
BinPlus("{1.2, 4} + {-1} = {0.2, 3}", List(1.2, 4), List(-1), List(0.2, 3), true),
BinPlus("{} + {} = {}", List(), List(), List(), true),
BinPlus("{\\"s\\"} + {PosInf} = {\\"sInfinity\\"}", List("s"), List(PosInf), List("sInfinity"), true),
BinPlus("{\\"s\\"} + {NegInf} = {\\"s-Infinity\\"}", List("s"), List(NegInf), List("s-Infinity"), true),
BinPlus("{NaN} + {\\"s\\"} = {\\"NaNs\\"}", List(NaN), List("s"), List("NaNs"), true),
BinPlus("{\\"A\\"} + {\\"B\\"} = {\\"AB\\"}", List("A"), List("B"), List("AB"), true),
BinPlus("{\\"1\\"} + {3} = {\\"13\\"}", List("1"), List(3), List("13"), true),
BinPlus("{\\"0\\"} + {1} = {\\"01\\"}", List("0"), List(1), List("01"), true),
BinPlus("{\\"2\\"} + {\\"\\"} = {\\"2\\"}", List("2"), List(""), List("2"), true),
BinPlus("{\\"2\\"} + {-3} = {\\"2-3\\"}", List("2"), List(-3), List("2-3"), true),
BinPlus("{\\"2\\"} + {1.2} = {\\"21.2\\"}", List("2"), List(1.2), List("21.2"), true),
BinPlus("{\\"-1\\"} + {\\"\\"} = {\\"-1\\"}", List("-1"), List(""), List("-1"), true),
BinPlus("{\\"-1\\"} + {1} = {\\"-11\\"}", List("-1"), List(1), List("-11"), true),
BinPlus("{\\"-1\\"} + {\\"\\", 2} = {\\"-1\\", \\"-12\\"}", List("-1"), List("", 2), List("-1", "-12"), true),
BinPlus("{\\"3.2\\", \\"\\"} + {0} = {\\"3.20\\", \\"0\\"}", List("3.2", ""), List(0), List("3.20", "0"), false),
BinPlus("{\\"3.2\\", \\"\\"} + {-1} = {\\"3.2-1\\", \\"-1\\"}", List("3.2", ""), List(-1), List("3.2-1", "-1"), true),
BinPlus("{\\"a\\", 1} + {\\"b\\", 2} >= {\\"ab\\", \\"a2\\", \\"1b\\", 3}", List("a", 1), List("b", 2), List("ab", "1b", "a2", 3), false),
BinPlus("{true, 1} + {\\"a\\", 2} = {\\"1a\\", \\"truea\\", 3, 3}", List(true, 1), List("a", 2), List("1a", "truea", 3, 3), true),
BinPlus("{false, 1} + {\\"a\\", 2} = {\\"1a\\", \\"falsea\\", 2, 3}", List(false, 1), List("a", 2), List("1a", "falsea", 2, 3), true),
BinPlus("{null, \\"1\\"} + {\\"1\\", \\"\\"} = {\\"null1\\", \\"11\\", \\"null\\", \\"1\\"}", List(NullTop, "1"), List("1", ""), List("null1", "11", "null", "1"), true),
BinPlus("{null, \\"1\\"} + {\\"1\\", \\"2\\"} = {\\"null1\\", \\"11\\", \\"null2\\", \\"12\\"}", List(NullTop, "1"), List("1", "2"), List("null1", "11", "null2", "12"), true),
BinPlus("{Undef, 1} + {\\"str\\", true} = {\\"undefinedstr\\", NaN, \\"1str\\", 2}", List(UndefTop, 1), List("str", true), List("undefinedstr", NaN, "1str", 2), true),
BinPlus("{\\"1\\", -2} + {1, \\"4\\"} = {\\"11\\", \\"14\\", -1, \\"-24\\"}", List("1", -2), List(1, "4"), List("11", "14", -1, "-24"), true),
BinPlus("{\\"\\"} + {1, 2} = {\\"1\\", \\"2\\"}", List(""), List(1, 2), List("1", "2"), true),
BinPlus("{\\"\\"} + {-1, 3.2} = {\\"-1\\", \\"3.2\\"}", List(""), List(-1, 3.2), List("-1", "3.2"), true),
BinPlus("{\\"\\"} + {1, -1} = {\\"1\\", \\"-1\\"}", List(""), List(1, -1), List("1", "-1"), true),
BinMinus("{} - {} = {}", List(), List(), List(), true),
BinMinus("{} - {1} = {}", List(), List(1), List(), true),
BinMinus("{-1} - {} = {}", List(-1), List(), List(), true),
BinMinus("{NaN} - {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMinus("{-2} - {NaN} = {NaN}", List(-2), List(NaN), List(NaN), true),
BinMinus("{PosInf} - {NegInf} = {PosInf}", List(PosInf), List(NegInf), List(PosInf), true),
BinMinus("{NegInf} - {PosInf} = {NegInf}", List(NegInf), List(PosInf), List(NegInf), true),
BinMinus("{PosInf} - {PosInf} = {NaN}", List(PosInf), List(PosInf), List(NaN), true),
BinMinus("{NegInf} - {NegInf} = {NaN}", List(NegInf), List(NegInf), List(NaN), true),
BinMinus("{PosInf} - {3} = {PosInf}", List(PosInf), List(3), List(PosInf), true),
BinMinus("{NegInf} - {-2} = {NegInf}", List(NegInf), List(-2), List(NegInf), true),
BinMinus("{2} - {PosInf} = {NegInf}", List(2), List(PosInf), List(NegInf), true),
BinMinus("{1} - {NegInf} = {PosInf}", List(1), List(NegInf), List(PosInf), true),
BinMinus("{3} - {2} = {1}", List(3), List(2), List(1), true),
BinMinus("{3} - {5} = {-2}", List(3), List(5), List(-2), true),
BinMinus("{3} - {-5} = {8}", List(3), List(-5), List(8), true),
BinMinus("{5} - {3.5} = {1.5}", List(5), List(3.5), List(1.5), true),
BinMinus("{3} - {-5, 3.5} = {8, 0.5}", List(3), List(-5, 3.5), List(8, 0.5), true),
BinMinus("{-2} - {1} = {-3}", List(-2), List(1), List(-3), true),
BinMinus("{-2} - {1, 3} >= {-3, -5}", List(-2), List(1, 3), List(-3, -5), false),
BinMinus("{5.2} - {2} = {3.2}", List(5.2), List(2), List(3.2), true),
BinMinus("{-2} - {-5} = {3}", List(-2), List(-5), List(3), true),
BinMinus("{-2} - {-5, -1} = {3, -1}", List(-2), List(-5, -1), List(3, -1), true),
BinMinus("{-2} - {-2, 1} = {0, -3}", List(-2), List(-2, -1), List(0, -3), true),
BinMinus("{2, 3} - {3} = {-1, 0}", List(2, 3), List(3), List(-1, 0), true),
BinMinus("{2, 3} - {-2} = {4, 5}", List(2, 3), List(-2), List(4, 5), true),
BinMinus("{2, 3} - {-2.5} = {4.5, 5.5}", List(2, 3), List(-2.5), List(4.5, 5.5), true),
BinMinus("{2, 3} - {-1, -3} >= {3, 4, 4, 6}", List(2, 3), List(-1, -3), List(3, 4, 4, 6), false),
BinMinus("{2, 3} - {-2.5, -1} = {4.5, 5.5, 3, 4}", List(2, 3), List(-2.5, -1), List(4.5, 5.5, 3, 4), true),
BinMinus("{2, 3} - {2.5} = {0.5, -0.5}", List(2, 3), List(2.5), List(0.5, -0.5), true),
BinMinus("{2, 3} - {2, 5} = {0, 1, -3, -2}", List(2, 3), List(2, 5), List(0, 1, -3, -2), true),
BinMinus("{-1, 4.2} - {3} = {-4, 1.2}", List(-1, 4.2), List(3), List(-4, 1.2), true),
BinMinus("{3.5, 1.5} - {0.5} >= {3, 1}", List(3.5, 1.5), List(0.5), List(3, 1), false),
BinMinus("{2.1, -4} - {-1} >= {3.1, -3}", List(2.1, -4), List(-1), List(3.1, -3), false),
BinMinus("{-2, 2.5} - {3, 2} = {-5, -0.5, -4, 0.5}", List(-2, 2.5), List(3, 2), List(-5, -0.5, -4, 0.5), true),
BinMinus("{-1, -5} - {-2, -1} = {1, 0, -3, -4}", List(-1, -5), List(-2, -1), List(1, 0, -3, -4), true),
BinMul("{NaN} * {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMul("{2} * {NaN} = {NaN}", List(2), List(NaN), List(NaN), true),
BinMul("{0} * {PosInf} = {NaN}", List(0), List(PosInf), List(NaN), true),
BinMul("{0} * {NegIng} = {NaN}", List(0), List(NegInf), List(NaN), true),
BinMul("{0} * {PosInf, NegInf} = {NaN}", List(0), List(PosInf, NegInf), List(NaN), true),
BinMul("{PosInf} * {PosInf} = {PosInf}", List(PosInf), List(PosInf), List(PosInf), true),
BinMul("{PosIng} * {NegInf} = {NegInf}", List(PosInf), List(NegInf), List(NegInf), true),
BinMul("{NegInf} * {NegInf} = {PosInf}", List(NegInf), List(NegInf), List(PosInf), true),
BinMul("{NegInf} * {PosInf} = {NegInf}", List(NegInf), List(PosInf), List(NegInf), true),
BinMul("{PosInf, NegInf} * {PosInf} = {PosInf, NegInf}", List(PosInf, NegInf), List(PosInf), List(PosInf, NegInf), true),
BinMul("{0, 2} * {PosInf} = {NaN, PosInf}", List(0, 2), List(PosInf), List(NaN, PosInf), true),
BinMul("{PosInf} * {0, 2} = {NaN, PosInf}", List(PosInf), List(0, 2), List(NaN, PosInf), true),
BinMul("{NegInf} * {2, 3} >= {NegInf}", List(NegInf), List(2, 3), List(NegInf), false),
BinMul("{1E9} * {5} = {5E9}", List(1E9), List(5), List(5E9), true),
BinMul("{PosInf} / {-1, 3.5} = {PosInf, NegInf}", List(PosInf), List(-1, 3.5), List(PosInf, NegInf), true),
BinMul("{-1, 3.5} / {PosInf} = {PosInf, NegInf}", List(-1, 3.5), List(PosInf), List(PosInf, NegInf), true),
BinMul("{NegInf} / {-1, 3.5} = {PosInf, NegInf}", List(NegInf), List(-1, 3.5), List(PosInf, NegInf), true),
BinMul("{-1, 3.5} / {NegInf} = {PosInf, NegInf}", List(-1, 3.5), List(NegInf), List(PosInf, NegInf), true),
BinMul("{0} * {-2, 3.4, 1} = {0}", List(0), List(-2, 3.4, 1), List(0), true),
BinMul("{-1, 4} * {0} = {0}", List(-1, 4), List(0), List(0), true),
BinMul("{2} * {3} = {6}", List(2), List(3), List(6), true),
BinMul("{2} * {1, 3} = {2, 6}", List(2), List(1, 3), List(2, 6), true),
BinMul("{2} * {-1} = {-2}", List(2), List(-1), List(-2), true),
BinMul("{2} * {3.2, -4} = {6.4, -6}", List(2), List(3.2, -4), List(6.4, -6), true),
BinMul("{0, 2} * {2} = {0, 4}", List(0, 2), List(2), List(0, 4), true),
BinMul("{0, 2} * {2, 5} = {0, 4, 10}", List(0, 2), List(2, 5), List(0, 4, 10), true),
BinMul("{0, 2} * {-1} = {0, -2}", List(0, 2), List(-1), List(0, -2), true),
BinMul("{0, 2} * {-1, 2.5} = {0, -2, 5}", List(0, 2), List(-1, 2.5), List(0, -2, 5), true),
BinMul("{2.5} * {2} = {5}", List(2.5), List(2), List(5), true),
BinMul("{2.5} * {2, 3} = {5, 7.5}", List(2.5), List(2, 3), List(5, 7.5), true),
BinMul("{-2} * {-4} = {8}", List(-2), List(-4), List(8), true),
BinMul("{-2} * {-4, 2.5} = {8, -5}", List(-2), List(-4, 2.5), List(8, -5), true),
BinMul("{2.5, 1.5} * {2} >= {5, 3}", List(2.5, 1.5), List(2), List(5, 3), false),
BinMul("{2.5 -2} * {1, 2} = {2.5, -2, 5, -4}", List(2.5, -2), List(1, 2), List(2.5, -2, 5, -4), true),
BinMul("{-2, -4} * {-3} >= {6, 12}", List(-2, -4), List(-3), List(6, 12), false),
BinMul("{-2, 2.5} * {-2, -1} = {2, 2.5, -4, 5}", List(-2, 2.5), List(-2, -1), List(2, 2.5, -4, 5), true),
BinDiv("{} / {} = {}", List(), List(), List(), true),
BinDiv("{NaN} / {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinDiv("{3} / {NaN} = {NaN}", List(3), List(NaN), List(NaN), true),
BinDiv("{PosInf} / {NegInf} = {NaN}", List(PosInf), List(NegInf), List(NaN), true),
BinDiv("{NegInf} / {NegInf} = {NaN}", List(NegInf), List(NegInf), List(NaN), true),
BinDiv("{PosInf} / {NegInf, PosInf} = {NaN}", List(PosInf), List(NegInf, PosInf), List(NaN), true),
BinDiv("{PosInf} / {0} = {PosInf}", List(PosInf), List(0), List(PosInf), true),
BinDiv("{NegInf} / {0} = {NegInf}", List(NegInf), List(0), List(NegInf), true),
BinDiv("{PosInf, NegInf} / {0} = {PosInf, NegInf}", List(PosInf, NegInf), List(0), List(PosInf, NegInf), true),
BinDiv("{PosInf} / {2, 3} = {PosInf}", List(PosInf), List(2, 3), List(PosInf), true),
BinDiv("{PosInf} / {-1} = {NegInf}", List(PosInf), List(-1), List(NegInf), true),
BinDiv("{PosInf} / {0.5} = {PosInf}", List(PosInf), List(0.5), List(PosInf), true),
BinDiv("{PosInf} / {-1, 0.5} = {NegInf, PosInf}", List(PosInf), List(-1, 0.5), List(NegInf, PosInf), true),
BinDiv("{NegInf} / {2, 3} = {NegInf}", List(NegInf), List(2, 3), List(NegInf), true),
BinDiv("{NegInf} / {-1} = {PosInf}", List(NegInf), List(-1), List(PosInf), true),
BinDiv("{NegInf} / {0.5} = {NegInf}", List(NegInf), List(0.5), List(NegInf), true),
BinDiv("{NegInf} / {-1, 0.5} = {PosInf, NegInf}", List(NegInf), List(-1, 0.5), List(PosInf, NegInf), true),
BinDiv("{PosInf, NegInf} / {-3} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3), List(PosInf, NegInf), true),
BinDiv("{PosInf, NegInf} / {-3, 0.5} = {PosInf, NegInf}", List(PosInf, NegInf), List(-3, 0.5), List(PosInf, NegInf), true),
BinDiv("{3} / {PosInf} = {0}", List(3), List(PosInf), List(0), true),
BinDiv("{-2, 0.5} / {NegInf} = {0}", List(-2, 0.5), List(NegInf), List(0), true),
BinDiv("{1, 2} / {PosInf, NegInf} = {0}", List(1, 2), List(PosInf, NegInf), List(0), true),
BinDiv("{0} / {0} = {NaN}", List(0), List(0), List(NaN), true),
BinDiv("{0} / {-3} = {0}", List(0), List(-3), List(0), true),
BinDiv("{0} / {PosInf} = {0}", List(0), List(PosInf), List(0), true),
BinDiv("{0} / {-3, 2} = {0}", List(0), List(-3, 2), List(0), true),
BinDiv("{3} / {0} = {PosInf}", List(3), List(0), List(PosInf), true),
BinDiv("{1, 2} / {0} >= {PosInf}", List(1, 2), List(0), List(PosInf), false),
BinDiv("{0, 2} / {0} = {NaN, PosInf}", List(1, 2), List(0), List(NaN, PosInf), true),
BinDiv("{0.5} / {0} = {PosInf}", List(0.5), List(0), List(PosInf), true),
BinDiv("{-1} / {0} = {NegInf}", List(-1), List(0), List(NegInf), true),
BinDiv("{-1, 0.5} / {0} = {PosInf, NegInf}", List(-1, 0.5), List(0), List(PosInf, NegInf), true),
BinDiv("{3} / {2} = {1.5}", List(3), List(2), List(1.5), true),
BinDiv("{4} / {2} = {2}", List(4), List(2), List(2), true),
BinDiv("{3} / {2, 3} = {1.5, 1}", List(3), List(2, 3), List(1.5, 1), true),
BinDiv("{3} / {0, 3} = {PosInf, 1}", List(3), List(0, 3), List(PosInf, 1), true),
BinDiv("{3} / {1.5} = {2}", List(3), List(1.5), List(2), true),
BinDiv("{3} / {-1} = {-3}", List(3), List(-1), List(-3), true),
BinDiv("{3} / {1.5, -1} = {2, -3}", List(3), List(1.5, -1), List(2, -3), true),
BinDiv("{-2} / {2} = {-1}", List(-2), List(2), List(-1), true),
BinDiv("{-2} / {2, 0} = {-1, NegInf}", List(-2), List(2, 0), List(-1, NegInf), true),
BinDiv("{-2} / {-1} = {2}", List(-2), List(-1), List(2), true),
BinDiv("{-4} / {0.8} = {-5}", List(-4), List(0.8), List(-5), true),
BinDiv("{-4} / {-1, 0.8} = {4, -5}", List(-4), List(-1, 0.8), List(4, -5), true),
BinDiv("{-2, -4} / {2} = {-1, -2}", List(-2, -4), List(2), List(-1, -2), true),
BinDiv("{-2, 0.8} / {2, 1} >= {-1, 0.4, 0.8, -2}", List(-2, 0.8), List(2, 1), List(-1, 0.4, 0.8, -2), false),
BinDiv("{-1, 0.4} / {2, 0} = {-0.5, PosInf, NegInf, 0.2}", List(-1, 0.4), List(2, 0), List(-0.5, PosInf, NegInf, 0.2), true),
BinDiv("{-2, 0.4} / {-1} = {2, -0.4}", List(-2, 0.4), List(-1), List(2, -0.4), true),
BinDiv("{-2, 0.8} / {-2, 0.4} = {1, -0.4, 2, -5}", List(-2, 0.8), List(-2, 0.4), List(-2, 0.4, 2, -5), true),
BinMod("{} % {} = {}", List(), List(), List(), true),
BinMod("{NaN} % {3} = {NaN}", List(NaN), List(3), List(NaN), true),
BinMod("{3} % {NaN} = {NaN}", List(3), List(NaN), List(NaN), true),
BinMod("{PosInf} % {3} = {NaN}", List(PosInf), List(3), List(NaN), true),
BinMod("{NegInf} % {2} = {NaN}", List(NegInf), List(2), List(NaN), true),
BinMod("{PosInf, NegInf} % {2, -3} = {NaN}", List(PosInf, NegInf), List(2, -3), List(NaN), true),
BinMod("{2} % {0} = {NaN}", List(2), List(0), List(NaN), true),
BinMod("{0} % {PosInf} = {0}", List(0), List(PosInf), List(0), true),
BinMod("{0} % {-1} = {0}", List(0), List(-1), List(0), true),
BinMod("{0} % {2, 3} = {0}", List(0), List(2, 3), List(0), true),
BinMod("{3} % {PosInf} = {3}", List(3), List(PosInf), List(3), true),
BinMod("{-1, 0.5} % {NegInf} = {-1, 0.5}", List(-1, 0.5), List(NegInf), List(-1, 0.5), true),
BinMod("{-2} % {PosInf, NegInf} = {-2}", List(-2), List(PosInf, NegInf), List(-2), true),
BinMod("{3} % {2} = {1}", List(3), List(2), List(1), true),
BinMod("{3} % {1, 0} = {1, NaN}", List(3), List(1, 0), List(1, NaN), true),
BinMod("{3} % {-2} = {1}", List(3), List(-2), List(1), true),
BinMod("{3} % {-2, 0.4} = {1, 0.2}", List(3), List(-2, 0.4), List(1, 0.2), true),
BinMod("{1, 0} % {1} >= {0, 0}", List(1, 0), List(1), List(0, 0), false),
BinMod("{-1} % {-2} = {-1}", List(-1), List(-2), List(-1), true),
BinMod("{-2} % {-1} = {0}", List(-2), List(-1), List(0), true),
BinMod("{-1} % {-1, -2} = {0, -1}", List(-1), List(-1, -2), List(0, -1), true),
BinMod("{3.5} % {-0.5} = {0}", List(3.5), List(-0.5), List(0), true),
BinMod("{-2, 2.5} % {3} = {-2, 2.5}", List(-2, 2.5), List(3), List(-2, 2.5), true),
BinMod("{-2, 3.2} % {0.4} >= {0}", List(-2, 3.2), List(0.4), List(0), false),
BinMod("{-2, -4} % {0.8} = {-0.4, 0}", List(-2, -4), List(0.8), List(-0.4, 0), true),
BinEq("{} == {} = {}", List(), List(), List(), true),
BinEq("{UndefTop} == {UndefTop} = {true}", List(UndefTop), List(UndefTop), List(true), true),
BinEq("{NullTop} == {NullTop} = {true}", List(NullTop), List(NullTop), List(true), true),
BinEq("{NaN} == {3, -1} = {false}", List(NaN), List(3, -1), List(false), true),
BinEq("{PosInf} == {NaN} = {false}", List(PosInf), List(NaN), List(false), true),
BinEq("{PosInf} == {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinEq("{NegInf} == {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinEq("{PosInf} == {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinEq("{NegInf} == {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinEq("{PosInf, NegInf} == {PosInf, NegInf} = {true, false}", List(PosInf, NegInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{PosInf, NegInf} == {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinEq("{PosInf, NegInf} == {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinEq("{PosInf} == {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{NegInf} == {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinEq("{1} == {1} = {true}", List(1), List(1), List(true), true),
BinEq("{-2} == {-2} = {true}", List(-2), List(-2), List(true), true),
BinEq("{1, 2} == {2} = {false, true}", List(1, 2), List(2), List(false, true), true),
BinEq("{-1, -2} == {-1} = {true, false}", List(-1, -2), List(-1), List(true, false), true),
BinEq("{2} == {1, 2} = {false, true}", List(2), List(1, 2), List(false, true), true),
BinEq("{-1} == {-1, -2} = {true, false}", List(-1), List(-1, -2), List(true, false), true),
BinEq("{-2} == {1, 2} = {false, false}", List(-2), List(1, 2), List(false), true),
BinEq("{1, 2} == {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinEq("{-1, -2} == {-4, -2} = {true, false}", List(-1, -2), List(-4, -2), List(true, false), true),
BinEq("{-1, 3} == {0.5} = {false, false}", List(-1, 3), List(0.5), List(false), false),
BinEq("{\\"1\\"} == {\\"1\\"} = {true}", List("1"), List("1"), List(true), true),
BinEq("{\\"-1\\"} == {\\"-1\\"} = {true}", List("-1"), List("-1"), List(true), true),
BinEq("{\\"1\\"} == {\\"2\\"} = {false}", List("1"), List("2"), List(false), true),
BinEq("{\\"-1\\"} == {\\"-2\\"} = {false}", List("-2"), List("-1"), List(false), true),
BinEq("{\\"1\\"} == {\\"1\\", \\"2\\"} = {true, false}", List("1"), List("1", "2"), List(true, false), true),
BinEq("{\\"-1\\"} == {\\"-2\\", \\"-1\\"} = {true, false}", List("-1"), List("-2", "-1"), List(false, true), true),
BinEq("{\\"1\\", \\"2\\"} == {\\"2\\", \\"3\\"} = {true, false}", List("1", "2"), List("2", "3"), List(true, false), true),
BinEq("{\\"-1\\", \\"-3\\"} == {\\"-3\\", \\"0.4\\"} = {true, false}", List("-1", "-3"), List("-3", "0.4"), List(true, false), true),
BinEq("{true} == {\\"1\\"} = {true}", List(true), List("1"), List(true), true),
BinEq("{false} == {\\"0\\"} = {true}", List(false), List("0"), List(true), true),
BinEq("{\\"true\\", true} == {\\"true\\"} = {true, false}", List("true", true), List("true"), List(true, false), true),
BinEq("{1, true} == {\\"true\\"} = {false}", List(1, true), List("true"), List(false), true),
BinEq("{true} == {ture} = {true}", List(true), List(true), List(true), true),
BinEq("{true} == {false} = {false}", List(true), List(false), List(false), true),
BinEq("{false} == {true} = {false}", List(false), List(true), List(false), true),
BinEq("{false} == {false} = {true}", List(false), List(false), List(true), true),
BinEq("{true, false} == {true} = {true, false}", List(true, false), List(true), List(true, false), true),
BinEq("{true, false} == {false} = {true, false}", List(true, false), List(false), List(true, false), true),
BinEq("{true} == {true, false} = {true, false}", List(true), List(true, false), List(true, false), true),
BinEq("{false} == {true, false} = {true, false}", List(false), List(true, false), List(true, false), true),
BinEq("{UndefTop} == {NullTop} = {true}", List(UndefTop), List(NullTop), List(true), true),
BinEq("{NullTop} == {UndefTop} = {true}", List(NullTop), List(UndefTop), List(true), true),
BinEq("{NaN} == {\\"NaN\\"} = {false}", List(NaN), List("NaN"), List(false), true),
BinEq("{1} == {\\"1\\"} = {true}", List(1), List("1"), List(true), true),
BinEq("{\\"1\\"} == {1} = {true}", List("1"), List(1), List(true), true),
BinEq("{-1} == {\\"-1\\"} = {true}", List(-1), List("-1"), List(true), true),
BinEq("{\\"-1\\"} == {-1} = {true}", List("-1"), List(-1), List(true), true),
BinEq("{-2} == {-1} = {false}", List(-2), List(-1), List(false), true),
BinEq("{1} == {\\"1\\", \\"2\\"} = {true, false}", List(1), List("1", "2"), List(true, false), true),
BinEq("{\\"1\\", \\"2\\"} == {1} = {true, false}", List("1", "2"), List(1), List(true, false), true),
BinEq("{-2} == {\\"1\\"} = {false}", List(-2), List("1"), List(false), true),
BinEq("{\\"1\\"} == {-2} = {false}", List("1"), List(-2), List(false), true),
BinEq("{1, 2} == {\\"1\\", \\"3\\"} = {true, false}", List(1, 2), List("1", "3"), List(true, false), true),
BinEq("{\\"1\\", \\"3\\"} == {1, 2} = {true, false}", List("1", "3"), List(1, 2), List(true, false), true),
BinEq("{-1, -2} == {\\"-1\\", \\"-3\\"} = {true, false}", List(-1, -2), List("-1", "-3"), List(true, false), true),
BinEq("{\\"-1\\", \\"-3\\"} == {-1, -2} = {true, false}", List("-1", "-3"), List(-1, -2), List(true, false), true),
BinEq("{true} == {1} = {true}", List(true), List(1), List(true), true),
BinEq("{false} == {0} = {true}", List(false), List(0), List(true), true),
BinEq("{1} == {true} = {true}", List(1), List(true), List(true), true),
BinEq("{0} == {false} = {true}", List(0), List(false), List(true), true),
BinEq("{false} == {1} = {false}", List(false), List(1), List(false), true),
BinEq("{1} == {false} = {false}", List(1), List(false), List(false), true),
BinEq("{3} == {true} = {false}", List(3), List(true), List(false), true),
BinEq("{true} == {3} = {false}", List(true), List(3), List(false), true),
BinNeq("{} != {} = {}", List(), List(), List(), true),
BinNeq("{UndefTop} != {UndefTop} = {false}", List(UndefTop), List(UndefTop), List(false), true),
BinNeq("{NullTop} != {NullTop} = {false}", List(NullTop), List(NullTop), List(false), true),
BinNeq("{NaN} != {3, -1} = {true}", List(NaN), List(3, -1), List(true), true),
BinNeq("{PosInf} != {NaN} = {true}", List(PosInf), List(NaN), List(true), true),
BinNeq("{PosInf} != {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinNeq("{NegInf} != {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinNeq("{PosInf} != {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinNeq("{NegInf} != {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinNeq("{PosInf, NegInf} != {PosInf, NegInf} = {true, false}", List(PosInf, NegInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{PosInf, NegInf} != {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinNeq("{PosInf, NegInf} != {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinNeq("{PosInf} != {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{NegInf} != {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinNeq("{1} != {1} = {false}", List(1), List(1), List(false), true),
BinNeq("{-2} != {-2} = {false}", List(-2), List(-2), List(false), true),
BinNeq("{1, 2} != {2} = {true, false}", List(1, 2), List(2), List(true, false), true),
BinNeq("{-1, -2} != {-1} = {false, true}", List(-1, -2), List(-1), List(false, true), true),
BinNeq("{2} != {1, 2} = {true, false}", List(2), List(1, 2), List(true, false), true),
BinNeq("{-1} != {-1, -2} = {false, true}", List(-1), List(-1, -2), List(false, true), true),
BinNeq("{-2} != {1, 2} = {true, true}", List(-2), List(1, 2), List(true), true),
BinNeq("{1, 2} != {2, 3} = {false, true}", List(1, 2), List(2, 3), List(false, true), true),
BinNeq("{-1, -2} != {-4, -2} = {false, true}", List(-1, -2), List(-4, -2), List(false, true), true),
BinNeq("{-1, 3} != {0.5} >= {true, true}", List(-1, 3), List(0.5), List(true), false),
BinNeq("{\\"1\\"} != {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinNeq("{\\"-1\\"} != {\\"-1\\"} = {false}", List("-1"), List("-1"), List(false), true),
BinNeq("{\\"1\\"} != {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinNeq("{\\"-1\\"} != {\\"-2\\"} = {true}", List("-2"), List("-1"), List(true), true),
BinNeq("{\\"1\\"} != {\\"1\\", \\"2\\"} = {false, true}", List("1"), List("1", "2"), List(false, true), true),
BinNeq("{\\"-1\\"} != {\\"-2\\", \\"-1\\"} = {false, true}", List("-1"), List("-2", "-1"), List(true, false), true),
BinNeq("{\\"1\\", \\"2\\"} != {\\"2\\", \\"3\\"} = {false, true}", List("1", "2"), List("2", "3"), List(false, true), true),
BinNeq("{\\"-1\\", \\"-3\\"} != {\\"-3\\", \\"0.4\\"} = {false, true}", List("-1", "-3"), List("-3", "0.4"), List(false, true), true),
BinNeq("{false} != {false} = {false}", List(false), List(false), List(false), true),
BinNeq("{false} != {true} = {true}", List(false), List(true), List(true), true),
BinNeq("{true} != {false} = {true}", List(true), List(false), List(true), true),
BinNeq("{true} != {true} = {false}", List(true), List(true), List(false), true),
BinNeq("{false, true} != {false} = {false, true}", List(false, true), List(false), List(false, true), true),
BinNeq("{false, true} != {true} = {false, true}", List(false, true), List(true), List(false, true), true),
BinNeq("{false} != {false, true} = {false, true}", List(false), List(false, true), List(false, true), true),
BinNeq("{true} != {false, true} = {false, true}", List(true), List(false, true), List(false, true), true),
BinNeq("{UndefTop} != {NullTop} = {false}", List(UndefTop), List(NullTop), List(false), true),
BinNeq("{NullTop} != {UndefTop} = {false}", List(NullTop), List(UndefTop), List(false), true),
BinNeq("{NaN} != {\\"NaN\\"} = {true}", List(NaN), List("NaN"), List(true), true),
BinNeq("{1} != {\\"1\\"} = {false}", List(1), List("1"), List(false), true),
BinNeq("{\\"1\\"} != {1} = {false}", List("1"), List(1), List(false), true),
BinNeq("{-1} != {\\"-1\\"} = {false}", List(-1), List("-1"), List(false), true),
BinNeq("{\\"-1\\"} != {-1} = {false}", List("-1"), List(-1), List(false), true),
BinNeq("{-2} != {-1} = {true}", List(-2), List(-1), List(true), true),
BinNeq("{1} != {\\"1\\", \\"2\\"} = {false, true}", List(1), List("1", "2"), List(false, true), true),
BinNeq("{\\"1\\", \\"2\\"} != {1} = {false, true}", List("1", "2"), List(1), List(false, true), true),
BinNeq("{-2} != {\\"1\\"} = {true}", List(-2), List("1"), List(true), true),
BinNeq("{\\"1\\"} != {-2} = {true}", List("1"), List(-2), List(true), true),
BinNeq("{1, 2} != {\\"1\\", \\"3\\"} = {false, true}", List(1, 2), List("1", "3"), List(false, true), true),
BinNeq("{\\"1\\", \\"3\\"} != {1, 2} = {false, true}", List("1", "3"), List(1, 2), List(true, false), true),
BinNeq("{-1, -2} != {\\"-1\\", \\"-3\\"} = {false, true}", List(-1, -2), List("-1", "-3"), List(false, true), true),
BinNeq("{\\"-1\\", \\"-3\\"} != {-1, -2} = {false, true}", List("-1", "-3"), List(-1, -2), List(false, true), true),
BinNeq("{true} != {1} = {false}", List(true), List(1), List(false), true),
BinNeq("{false} != {0} = {false}", List(false), List(0), List(false), true),
BinNeq("{1} != {true} = {false}", List(1), List(true), List(false), true),
BinNeq("{0} != {false} = {false}", List(0), List(false), List(false), true),
BinNeq("{false} != {1} = {true}", List(false), List(1), List(true), true),
BinNeq("{1} != {false} = {true}", List(1), List(false), List(true), true),
BinNeq("{3} != {true} = {true}", List(3), List(true), List(true), true),
BinNeq("{true} != {3} = {true}", List(true), List(3), List(true), true),
BinSEq("{} === {} = {}", List(), List(), List(), true),
BinSEq("{UndefTop} === {UndefTop} = {true}", List(UndefTop), List(UndefTop), List(true), true),
BinSEq("{NullTop} === {NullTop} = {true}", List(NullTop), List(NullTop), List(true), true),
BinSEq("{} === {1, -1} = {}", List(), List(1, -1), List(), true),
BinSEq("{1, -1} === {} = {}", List(1, -1), List(), List(), true),
BinSEq("{NaN} === {1} = {false}", List(NaN), List(1), List(false), true),
BinSEq("{2} === {NaN} = {false}", List(2), List(NaN), List(false), true),
BinSEq("{PosInf} === {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinSEq("{PosInf, NegInf} === {PosInf} = {true, false}", List(PosInf, NegInf), List(PosInf), List(true, false), true),
BinSEq("{1} === {1} = {true}", List(1), List(1), List(true), true),
BinSEq("{-2} === {-2} = {true}", List(-2), List(-2), List(true), true),
BinSEq("{1, 2} === {2} = {false, true}", List(1, 2), List(2), List(false, true), true),
BinSEq("{-1, -2} === {-1} = {true, false}", List(-1, -2), List(-1), List(true, false), true),
BinSEq("{2} === {1, 2} = {false, true}", List(2), List(1, 2), List(false, true), true),
BinSEq("{-1} == {-1, -2} = {true, false}", List(-1), List(-1, -2), List(true, false), true),
BinSEq("{-2} === {1, 2} = {false, false}", List(-2), List(1, 2), List(false), true),
BinSEq("{1, 2} === {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinSEq("{-1, -2} === {-4, -2} = {true, false}", List(-1, -2), List(-4, -2), List(true, false), true),
BinSEq("{-1, 3} === {0.5} >= {false, false}", List(-1, 3), List(0.5), List(false), false),
BinSEq("{1} === {\\"1\\"} = {false}", List(1), List("1"), List(false), true),
BinSEq("{\\"1\\"} === {1} = {false}", List("1"), List(1), List(false), true),
BinSEq("{\\"1\\"} === {\\"1\\"} = {true}", List("1"), List("1"), List(true), true),
BinSEq("{\\"-1\\"} === {\\"-1\\"} = {true}", List("-1"), List("-1"), List(true), true),
BinSEq("{\\"1\\"} === {\\"2\\"} = {false}", List("1"), List("2"), List(false), true),
BinSEq("{\\"-1\\"} === {\\"-2\\"} = {false}", List("-2"), List("-1"), List(false), true),
BinSEq("{\\"1\\"} === {\\"1\\", \\"2\\"} = {true, false}", List("1"), List("1", "2"), List(true, false), true),
BinSEq("{\\"-1\\"} === {\\"-2\\", \\"-1\\"} = {true, false}", List("-1"), List("-2", "-1"), List(false, true), true),
BinSEq("{\\"1\\", \\"2\\"} === {\\"2\\", \\"3\\"} = {true, false}", List("1", "2"), List("2", "3"), List(true, false), true),
BinSEq("{\\"-1\\", \\"-3\\"} === {\\"-3\\", \\"0.4\\"} = {true, false}", List("-1", "-3"), List("-3", "0.4"), List(true, false), true),
BinSEq("{\\"s\\"} === {\\"s\\"} = {true}", List("s"), List("s"), List(true), true),
BinSEq("{\\"Ta\\"} === {\\"ta\\"} = {false}", List("Ta"), List("ta"), List(false), true),
BinSEq("{true} === {\\"1\\"} = {false}", List(true), List("1"), List(false), true),
BinSEq("{false} === {\\"0\\"} = {false}", List(false), List("0"), List(false), true),
BinSEq("{true} === {1} = {false}", List(true), List(1), List(false), true),
BinSEq("{false} === {0} = {false}", List(false), List(0), List(false), true),
BinSEq("{true} === {true} = {true}", List(true), List(true), List(true), true),
BinSNeq("{} !== {} = {}", List(), List(), List(), true),
BinSNeq("{UndefTop} !== {UndefTop} = {false}", List(UndefTop), List(UndefTop), List(false), true),
BinSNeq("{NullTop} !== {NullTop} = {false}", List(NullTop), List(NullTop), List(false), true),
BinSNeq("{} !== {1, -1} = {}", List(), List(1, -1), List(), true),
BinSNeq("{1, -1} !== {} = {}", List(1, -1), List(), List(), true),
BinSNeq("{NaN} !== {1} = {true}", List(NaN), List(1), List(true), true),
BinSNeq("{2} !== {NaN} = {true}", List(2), List(NaN), List(true), true),
BinSNeq("{PosInf} !== {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinSNeq("{PosInf, NegInf} !== {PosInf} = {false, true}", List(PosInf, NegInf), List(PosInf), List(false, true), true),
BinSNeq("{1} !== {1} = {false}", List(1), List(1), List(false), true),
BinSNeq("{-2} !== {-2} = {false}", List(-2), List(-2), List(false), true),
BinSNeq("{1, 2} !== {2} = {true, false}", List(1, 2), List(2), List(true, false), true),
BinSNeq("{-1, -2} !== {-1} = {false, true}", List(-1, -2), List(-1), List(false, true), true),
BinSNeq("{2} !== {1, 2} = {true, false}", List(2), List(1, 2), List(true, false), true),
BinSNeq("{-1} !== {-1, -2} = {false, true}", List(-1), List(-1, -2), List(false, true), true),
BinSNeq("{-2} !== {1, 2} = {true, true}", List(-2), List(1, 2), List(true), true),
BinSNeq("{1, 2} !== {2, 3} = {false, true}", List(1, 2), List(2, 3), List(false, true), true),
BinSNeq("{-1, -2} !== {-4, -2} = {false, true}", List(-1, -2), List(-4, -2), List(false, true), true),
BinSNeq("{-1, 3} !== {0.5} >= {true, true}", List(-1, 3), List(0.5), List(true), false),
BinSNeq("{1} !== {\\"1\\"} = {true}", List(1), List("1"), List(true), true),
BinSNeq("{\\"1\\"} !== {1} = {true}", List("1"), List(1), List(true), true),
BinSNeq("{\\"1\\"} !== {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinSNeq("{\\"-1\\"} !== {\\"-1\\"} = {false}", List("-1"), List("-1"), List(false), true),
BinSNeq("{\\"1\\"} !== {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinSNeq("{\\"-1\\"} !== {\\"-2\\"} = {true}", List("-2"), List("-1"), List(true), true),
BinSNeq("{\\"1\\"} !== {\\"1\\", \\"2\\"} = {false, true}", List("1"), List("1", "2"), List(false, true), true),
BinSNeq("{\\"-1\\"} !== {\\"-2\\", \\"-1\\"} = {false, true}", List("-1"), List("-2", "-1"), List(true, false), true),
BinSNeq("{\\"1\\", \\"2\\"} !== {\\"2\\", \\"3\\"} = {false, true}", List("1", "2"), List("2", "3"), List(false, true), true),
BinSNeq("{\\"-1\\", \\"-3\\"} !== {\\"-3\\", \\"0.4\\"} = {false, true}", List("-1", "-3"), List("-3", "0.4"), List(false, true), true),
BinSNeq("{\\"s\\"} !== {\\"s\\"} = {false}", List("s"), List("s"), List(false), true),
BinSNeq("{\\"Ta\\"} !== {\\"ta\\"} = {true}", List("Ta"), List("ta"), List(true), true),
BinSNeq("{true} !== {\\"1\\"} = {true}", List(true), List("1"), List(true), true),
BinSNeq("{false} !== {\\"0\\"} = {true}", List(false), List("0"), List(true), true),
BinSNeq("{true} !== {1} = {true}", List(true), List(1), List(true), true),
BinSNeq("{false} !== {0} = {true}", List(false), List(0), List(true), true),
BinSNeq("{true} !== {true} = {false}", List(true), List(true), List(false), true),
BinLess("{2, \\"-3\\"} < {NaN, \\"5\\"} = {true, false}", List(2, "-3"), List(NaN, "5"), List(true, false), true),
BinLess("{} < {} = {}", List(), List(), List(), true),
BinLess("{false} < {true} = {true}", List(false), List(true), List(true), true),
BinLess("{true} < {false} = {false}", List(true), List(false), List(false), true),
BinLess("{false} < {true, false} = {true, false}", List(false), List(true, false), List(true, false), true),
BinLess("{true} < {true, false} >= {false}", List(true), List(true, false), List(false), false),
BinLess("{true, false} < {true} = {false, true}", List(true, false), List(true), List(true, false), true),
BinLess("{true, false} < {false} = {false}", List(true, false), List(false), List(false), true),
BinLess("{null} < {true} = {true}", List(NullTop), List(true), List(true), true),
BinLess("{false} < {null} = {false}", List(false), List(NullTop), List(false), true),
BinLess("{NaN} < {3} = {false}", List(NaN), List(3), List(false), true),
BinLess("{2} < {NaN} = {false}", List(2), List(NaN), List(false), true),
BinLess("{PosInf} < {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinLess("{NegInf} < {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinLess("{PosInf, NegInf} < {1} = {true, false}", List(PosInf, NegInf), List(1), List(true, false), true),
BinLess("{3} < {PosInf, NegInf} = {true, false}", List(3), List(PosInf, NegInf), List(true, false), true),
BinLess("{PosInf} < {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinLess("{NegInf} < {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinLess("{1} < {2} = {true}", List(1), List(2), List(true), true),
BinLess("{2} < {1} = {false}", List(2), List(1), List(false), true),
BinLess("{1} < {1.5} = {true}", List(1), List(1.5), List(true), true),
BinLess("{1} < {-1} = {false}", List(1), List(-1), List(false), true),
BinLess("{-3} < {1} = {true}", List(-3), List(1), List(true), true),
BinLess("{3.4} < {1} = {false}", List(3.4), List(1), List(false), true),
BinLess("{-2} < {-1} = {true}", List(-2), List(-1), List(true), true),
BinLess("{-1} < {-2} = {false}", List(-1), List(-2), List(false), true),
BinLess("{1, 2} < {2, 3} = {true, false}", List(1, 2), List(2, 3), List(true, false), true),
BinLess("{-2, -3} < {-5, -2} = {true, false}", List(-2, -3), List(-5, -2), List(true, false), true),
BinLess("{-2, \\"-5\\"} < {\\"-3\\", -2} = {true, false}", List(-2, "-5"), List("-3", -2), List(true, false), true),
BinLess("{\\"1\\"} < {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinLess("{\\"1\\"} < {\\"1\\"} = {false}", List("1"), List("1"), List(false), true),
BinLess("{\\"1\\"} < {\\"1d\\"} = {true}", List("1"), List("1d"), List(true), true),
BinLess("{\\"1\\"} < {\\"-1\\"} = {false}", List("1"), List("-1"), List(false), true),
BinLess("{\\"\\"} < {\\"1\\"} = {true}", List(""), List("1"), List(true), true),
BinLess("{\\"s\\"} < {\\"1\\"} = {false}", List("s"), List("1"), List(false), true),
BinLess("{\\"s\\"} < {\\"t\\"} = {true}", List("s"), List("t"), List(true), true),
BinLess("{\\"s\\"} < {\\"s1\\"} = {true}", List("s"), List("s1"), List(true), true),
BinLess("{\\"s\\"} < {\\"d\\"} = {false}", List("s"), List("d"), List(false), true),
BinLess("{\\"1\\", \\"2\\"} < {\\"12\\", \\"23\\"} = {true, false}", List("1", "2"), List("12", "23"), List(true, false), true),
BinLess("{\\"s\\", \\"d\\"} < {\\"sd\\"} = {ture, false}", List("s", "d"), List("sd"), List(true, false), true),
BinLess("{\\"-5\\"} < {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLess("{3} < {\\"5\\", NaN} = {true, false}", List(3), List("5", NaN), List(true, false), true),
BinLess("{-2, \\"-5\\"} < {\\"-3\\", NaN} = {false}", List(-2, "-5"), List("-3", NaN), List(false), false),
BinGreater("{} > {} = {}", List(), List(), List(), true),
BinGreater("{2} > {1} = {true}", List(2), List(1), List(true), true),
BinGreater("{2} > {4} = {false}", List(2), List(4), List(false), true),
BinGreater("{2} > {1, 4} = {true, false}", List(2), List(1, 4), List(true, false), true),
BinGreater("{2} > {-1} = {true}", List(2), List(-1), List(true), true),
BinGreater("{2} > {3.5} = {false}", List(2), List(3.5), List(false), true),
BinGreater("{2} > {-1, 3.5} = {true, false}", List(2), List(-1, 3.5), List(true, false), true),
BinGreater("{-3} > {1} = {false}", List(-3), List(1), List(false), true),
BinGreater("{4.3} > {1} = {true}", List(4.3), List(1), List(true), true),
BinGreater("{-3} > {0, 1} = {false}", List(-3), List(0, 1), List(false), true),
BinGreater("{4.5} > {0, 5} = {ture, false}", List(4.5), List(0, 5), List(true, false), true),
BinGreater("{-2} > {4.5} = {false}", List(-2), List(4.5), List(false), true),
BinGreater("{3.2} > {-1} = {true}", List(3.2), List(-1), List(true), true),
BinGreater("{-3} > {-2, -5} = {true, false}", List(-3), List(-2, -5), List(true, false), true),
BinGreater("{1, 3} > {2} = {true, false}", List(1, 3), List(2), List(true, false), true),
BinGreater("{1, 2} > {3.5} >= {false}", List(1, 2), List(3.5), List(false), false),
BinGreater("{1, 2} > {1.5} = {true, false}", List(1, 2), List(1.5), List(true, false), true),
BinGreater("{1, 2} > {-1, 3.5} = {true, false}", List(1, 2), List(-1, 3.5), List(true, false), true),
BinGreater("{-2, -5} > {2} >= {false}", List(-2, -5), List(2), List(false), false),
BinGreater("{-1, 3.5} > {2} = {true, false}", List(-1, 3.5), List(2), List(true, false), true),
BinGreater("{-2, -3} > {-4} >= {true}", List(-2, -3), List(-4), List(true), false),
BinGreater("{-2, -3} > {0, 6} >= {false}", List(-2, -3), List(0, 6), List(false), false),
BinGreater("{-2, 5.5} > {0, 6} = {true, false}", List(-2, 5.5), List(0, 6), List(true, false), true),
BinGreater("{-3, 2.5} > {1.5, -2} = {true, false}", List(-3, 2.5), List(1.5, -2), List(true, false), true),
BinGreater("{-2, \\"-3\\"} > {\\"-5\\", -2} = {true, false}", List(-2, "-3"), List("-5", -2), List(true, false), true),
BinGreater("{false} > {true} = {false}", List(false), List(true), List(false), true),
BinGreater("{true} > {false} = {true}", List(true), List(false), List(true), true),
BinGreater("{false} > {true, false} = {false}", List(false), List(true, false), List(false), true),
BinGreater("{true, false} > {true} >= {false}", List(true, false), List(true), List(false), false),
BinGreater("{true, false} > {false} = {true, false}", List(true, false), List(false), List(true, false), true),
BinGreater("{null} > {true} = {false}", List(NullTop), List(true), List(false), true),
BinGreater("{false} > {null} = {false}", List(false), List(NullTop), List(false), true),
BinGreater("{NaN} > {3} = {false}", List(NaN), List(3), List(false), true),
BinGreater("{2} > {NaN} = {false}", List(2), List(NaN), List(false), true),
BinGreater("{PosInf} > {PosInf} = {false}", List(PosInf), List(PosInf), List(false), true),
BinGreater("{NegInf} > {NegInf} = {false}", List(NegInf), List(NegInf), List(false), true),
BinGreater("{PosInf, NegInf} > {1} = {true, false}", List(PosInf, NegInf), List(1), List(true, false), true),
BinGreater("{3} > {PosInf, NegInf} = {true, false}", List(3), List(PosInf, NegInf), List(true, false), true),
BinGreater("{PosInf} > {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinGreater("{NegInf} > {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinGreater("{-2, \\"-3\\"} > {-4, \\"-1\\"} >= {true, false}", List(-2, "-3"), List(-4, "-1"), List(true, false), true),
BinGreater("{} > {} = {}", List(), List(), List(), true),
BinGreater("{\\"-5\\"} > {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinGreater("{-2} > {\\"-3\\", NaN} = {true, false}", List(-2), List("-3", NaN), List(true, false), true),
BinGreater("{3} > {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false),
BinLessEq("{} <= {} = {}", List(), List(), List(), true),
BinLessEq("{} <= {1} = {}", List(), List(1), List(), true),
BinLessEq("{1} <= {} = {}", List(1), List(), List(), true),
BinLessEq("{true} <= {true} = {true}", List(true), List(true), List(true), true),
BinLessEq("{false} <= {true, false} = {true}", List(false), List(true, false), List(true), true),
BinLessEq("{null} <= {0, 1} = {true}", List(NullTop), List(0, 1), List(true), true),
BinLessEq("{null} <= {-1, -4} = {true, false}", List(NullTop), List(-1, -4), List(true, false), true),
BinLessEq("{UndefTop} <= {0, 1} = {false}", List(UndefTop), List(0, 1), List(false), true),
BinLessEq("{PosInf} <= {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinLessEq("{NegInf} <= {PosInf} = {true}", List(NegInf), List(PosInf), List(true), true),
BinLessEq("{PosInf} <= {NegInf} = {false}", List(PosInf), List(NegInf), List(false), true),
BinLessEq("{NegInf} <= {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinLessEq("{NegInf} <= {PosInf, NegInf} = {true}", List(NegInf), List(PosInf, NegInf), List(true), true),
BinLessEq("{PosInf} <= {PosInf, NegInf} = {true, false}", List(PosInf), List(PosInf, NegInf), List(true, false), true),
BinLessEq("{PosInf, NegInf} <= {PosInf} = {true}", List(PosInf, NegInf), List(PosInf), List(true), true),
BinLessEq("{PosInf, NegInf} <= {NegInf} = {true, false}", List(PosInf, NegInf), List(NegInf), List(true, false), true),
BinLessEq("{1} <= {1} = {true}", List(1), List(1), List(true), true),
BinLessEq("{1} <= {0, 1} = {true, false}", List(1), List(0, 1), List(true, false), true),
BinLessEq("{1} <= {-1} = {false}", List(1), List(-1), List(false), true),
BinLessEq("{1} <= {2.3} = {true}", List(1), List(2.3), List(true), true),
BinLessEq("{1} <= {-1, 2.3} = {true, false}", List(1), List(-1, 2.3), List(true, false), true),
BinLessEq("{-1} <= {-1} = {true}", List(-1), List(-1), List(true), true),
BinLessEq("{-1} <= {1} = {true}", List(-1), List(1), List(true), true),
BinLessEq("{-1} <= {0, 1} = {true}", List(-1), List(0, 1), List(true), true),
BinLessEq("{-1} <= {-3} = {false}", List(-1), List(-3), List(false), true),
BinLessEq("{-1} <= {2.5, -3} = {true, false}", List(-1), List(2.5, -3), List(true, false), true),
BinLessEq("{2.5} <= {3} = {true}", List(2.5), List(3), List(true), true),
BinLessEq("{2.5} <= {3.5} = {true}", List(2.5), List(3.5), List(true), true),
BinLessEq("{2.5} <= {2, 3} = {true, false}", List(2.5), List(2, 3), List(true, false), true),
BinLessEq("{2.5} <= {-1, 3.5} = {true, false}", List(2.5), List(-1, 3.5), List(true, false), true),
BinLessEq("{0, 1} <= {0} = {true, false}", List(0, 1), List(0), List(true, false), true),
BinLessEq("{0, 1} <= {-1} = {false}", List(0, 1), List(-1), List(false), true),
BinLessEq("{0, 1} <= {0, 3} = {true, false}", List(0, 1), List(0, 3), List(true, false), true),
BinLessEq("{0, 1} <= {-2, 3.5} = {true, false}", List(0, 1), List(-2, 3.5), List(true, false), true),
BinLessEq("{-1, 2.5} <= {1} = {true, false}", List(-1, 2.5), List(1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {-1} = {true, false}", List(-1, 2.5), List(-1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {0, 1} = {true, false}", List(-1, 2.5), List(0, 1), List(true, false), true),
BinLessEq("{-1, 2.5} <= {-3, 2.5} = {true, false}", List(-1, 2.5), List(-3, 2.5), List(true, false), true),
BinLessEq("{-1, \\"2\\"} <= {\\"3\\", 2.5} = {true, false}", List(-1, "2"), List("3", 2.5), List(true, false), true),
BinLessEq("{\\"s\\"} <= {\\"s\\"} = {true}", List("s"), List("s"), List(true), true),
BinLessEq("{\\"s\\"} <= {\\"str\\"} = {true}", List("s"), List("str"), List(true), true),
BinLessEq("{\\"a\\"} <= {\\"b\\"} = {true}", List("a"), List("b"), List(true), true),
BinLessEq("{\\"1\\"} <= {\\"2\\"} = {true}", List("1"), List("2"), List(true), true),
BinLessEq("{\\"0\\"} <= {\\"-3\\"} = {false}", List("0"), List("-3"), List(false), true),
BinLessEq("{\\"0\\", \\"1\\"} <= {\\"0\\", \\"3\\"} = {true, false}", List("0", "1"), List("0", "3"), List(true, false), true),
BinLessEq("{\\"0\\", \\"1\\"} <= {\\"-1\\"} = {false}", List("0", "1"), List("-1"), List(false), false),
BinLessEq("{\\"-1\\", \\"2.5\\"} <= {\\"-1\\"} = {true, false}", List("-1", "2.5"), List("-1"), List(true, false), true),
BinLessEq("{\\"-5\\"} <= {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLessEq("{-2} <= {\\"-3\\", NaN} = {false}", List(-2), List("-3", NaN), List(false), false),
BinLessEq("{3} <= {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false),
BinGreaterEq("{} >= {} = {}", List(), List(), List(), true),
BinGreaterEq("{NaN} >= {2} = {false}", List(NaN), List(2), List(false), true),
BinGreaterEq("{PosInf} >= {NegInf} = {true}", List(PosInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf} >= {PosInf} = {true}", List(PosInf), List(PosInf), List(true), true),
BinGreaterEq("{NegInf} >= {PosInf} = {false}", List(NegInf), List(PosInf), List(false), true),
BinGreaterEq("{NegInf} >= {NegInf} = {true}", List(NegInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf, NegInf} >= {NegInf} = {true}", List(PosInf, NegInf), List(NegInf), List(true), true),
BinGreaterEq("{PosInf} >= {PosInf, NegInf} = {true}", List(PosInf), List(PosInf, NegInf), List(true), true),
BinGreaterEq("{NegInf} >= {PosInf, NegInf} = {true, false}", List(NegInf), List(PosInf, NegInf), List(true, false), true),
BinGreaterEq("{1} >= {1} = {true}", List(1), List(1), List(true), true),
BinGreaterEq("{3} >= {5} = {false}", List(3), List(5), List(false), true),
BinGreaterEq("{1} >= {-1} = {true}", List(1), List(-1), List(true), true),
BinGreaterEq("{1} >= {1, 3} = {true, false}", List(1), List(1, 3), List(true, false), true),
BinGreaterEq("{1} >= {-1, 3.5} = {true, false}", List(1), List(-1, 3.5), List(true, false), true),
BinGreaterEq("{-1} >= {0} = {false}", List(-1), List(0), List(false), true),
BinGreaterEq("{3.5} >= {0} = {true}", List(3.5), List(0), List(true), true),
BinGreaterEq("{-1} >= {-2} = {true}", List(-1), List(-2), List(true), true),
BinGreaterEq("{-5} >= {-2} = {false}", List(-5), List(-2), List(false), true),
BinGreaterEq("{-1} >= {1, 2} = {false}", List(-1), List(1, 2), List(false), true),
BinGreaterEq("{3.5} >= {3, 4} = {true, false}", List(3.5), List(3, 4), List(true, false), true),
BinGreaterEq("{-1} >= {-2, 2.5} = {ture, false}", List(-1), List(-2, 2.5), List(true, false), true),
BinGreaterEq("{0, 1} >= {0} = {true}", List(0, 1), List(0), List(true), true),
BinGreaterEq("{0, 1} >= {-2} = {true}", List(0, 1), List(-2), List(true), true),
BinGreaterEq("{0, 1} >= {0.5} = {true, false}", List(0, 1), List(0.5), List(true, false), true),
BinGreaterEq("{0, 1} >= {0, 3} = {true, false}", List(0, 1), List(0, 3), List(true, false), true),
BinGreaterEq("{0, 1} >= {-2, 0.5} = {true, false}", List(0, 1), List(-2, 0.5), List(true, false), true),
BinGreaterEq("{\\"0\\", 1} >= {\\"-2\\", 0.5} = {true, false}", List("0", 1), List("-2", 0.5), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {1} = {true, false}", List(-2, 3.5), List(1), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {0, 1} = {true, false}", List(-2, 3.5), List(0, 1), List(true, false), true),
BinGreaterEq("{-2, 3.5} >= {-2} >= {true}", List(-2, 3.5), List(-2), List(true), false),
BinGreaterEq("{-2, 3.5} >= {-3, 4.2} = {true, false}", List(-2, 3.5), List(-3, 4.2), List(true, false), true),
BinLessEq("{\\"-5\\"} >= {\\"-3\\", -2} = {true, false}", List("-5"), List("-3", -2), List(true, false), true),
BinLessEq("{-2} >= {\\"-3\\", NaN} = {true, false}", List(-2), List("-3", NaN), List(true, false), true),
BinLessEq("{3} >= {\\"5\\", NaN} = {false}", List(3), List("5", NaN), List(false), false)
)
val unaCases:List[TypeOperator] = List (
UnaVoid("void {1} = {\\"undefined\\"}", List(1), List(UndefTop), true),
UnaVoid("void {null} = {\\"undefined\\"}", List(NullTop), List(UndefTop), true),
UnaVoid("void {null, PosInf} = {\\"undefined\\"}", List(NullTop, PosInf), List(UndefTop), true),
UnaPlus("+{null} = {0}", List(NullTop), List(0), true),
UnaPlus("+{true, 1} = {1}", List(true, 1), List(1), true),
UnaMinus("-{NaN} = {NaN}", List(NaN), List(NaN), true),
UnaMinus("-{0} = {0}", List(0), List(0), true),
UnaMinus("-{1} = {-1}", List(1), List(-1), true),
UnaMinus("-{-3.2} = {3.2}", List(-3.2), List(3.2), true),
UnaMinus("-{-3} = {3}", List(-3), List(3), true),
UnaMinus("-{1, 3} = {-3, -1}", List(1,3), List(-1,-3), true),
UnaMinus("-{-1, 2.1} = {1, -2.1}", List(-1, 2.1), List(1, -2.1), true),
UnaMinus("-{PosInf} = {NegInf}", List(PosInf), List(NegInf), true),
UnaMinus("-{NegInf} = {PosInf}", List(NegInf), List(PosInf), true),
UnaMinus("-{\\"str\\", null} = {NaN, 0}", List("str", NullTop), List(NaN, 0), true),
UnaBitNeg("~{32} = {-33}", List(32), List(-33), true),
UnaBitNeg("~{3.1} = {-4}", List(3.1), List(-4), true),
UnaBitNeg("~{3, 10} = {-4, -11}", List(3, 10), List(-4, -11), true),
UnaBitNeg("~{-3, 0.5} = {2, -1}", List(-3, 0.5), List(2, -1), true),
UnaBitNeg("~{1, -1} = {-2, 0}", List(1, -1), List(-2, 0), true),
UnaNeg("!{true} = {false}", List(true), List(false), true),
UnaNeg("!{false} = {true}", List(false), List(true), true),
UnaNeg("!{true, false = {false, true}}", List(true, false), List(false, true), true)
)
def suite(): Test = {
// Initialize AddressManager
AddressManager.reset()
val suite = new TestSuite("Typing Operator Test")
val suiteJoin = new TestSuite("Join")
val suiteBin = new TestSuite("Binary Operators")
val suiteUna = new TestSuite("Unary Operators")
for(joinCase <-joinCases) {
suiteJoin.addTest(new JoinTest(joinCase._1, joinCase._2, joinCase._3, joinCase._4, joinCase._5, "testJoin"))
}
for(binCase <-binCases) {
binCase match {
case BinBitOr(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitOr"))
case BinBitAnd(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitAnd"))
case BinBitXor(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testBitXor"))
case BinLShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLShift"))
case BinRShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testRShift"))
case BinURShift(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testURShift"))
case BinPlus(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testPlus"))
case BinMinus(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMinus"))
case BinMul(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMul"))
case BinDiv(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testDiv"))
case BinMod(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testMod"))
case BinEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testEq"))
case BinNeq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testNeq"))
case BinSEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testSEq"))
case BinSNeq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testSNeq"))
case BinLess(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLess"))
case BinGreater(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testGreater"))
case BinLessEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testLessEq"))
case BinGreaterEq(name, lhs, rhs, expec, equal) =>
suiteBin.addTest(new BinTest(name, lhs, rhs, expec, equal, "testGreaterEq"))
}
}
for(unaCase <-unaCases) {
unaCase match {
case UnaVoid(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testVoid"))
case UnaPlus(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testPlus"))
case UnaMinus(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testMinus"))
case UnaBitNeg(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testBitNeg"))
case UnaNeg(name, oprnd, expec, equal) =>
suiteUna.addTest(new UnaTest(name, oprnd, expec, equal, "testNeg"))
}
}
suite.addTest(suiteJoin)
suite.addTest(suiteBin)
suite.addTest(suiteUna)
suite
}
}
class OperatorTestCase(func:String) extends TestCase(func) {
// alpha function : concs -> abs
def toValue(in:List[Any]):Value = {
var v:Value = ValueBot
for(i <-in) {
v = i match {
case UndefTop => v + Value(AbsUndef.alpha)
case NullTop => v + Value(AbsNull.alpha)
case NaN => v + Value(NaN)
case PosInf => v + Value(PosInf)
case NegInf => v + Value(NegInf)
case n:Int => v + Value(AbsNumber.alpha(n))
case d:Number => v + Value(AbsNumber.alpha(d.doubleValue))
case s:String => v + Value(AbsString.alpha(s))
case b:Boolean => v + Value(AbsBool.alpha(b))
}
}
v
}
}
class JoinTest(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var _left:Value = ValueBot
var _right:Value = ValueBot
var _expec:Value = ValueBot
def joinTest() {}
override def getName = name
override def setUp() = {
_left = super.toValue(lhs)
_right = super.toValue(rhs)
_expec = super.toValue(expec)
}
def testJoin = {
assertTrue(_expec <= (_left + _right))
if (equal) assertTrue((_left + _right) <= _expec)
}
}
class BinTest(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var leftVal:Value = ValueBot
var rightVal:Value = ValueBot
var expecVal:Value = ValueBot
def binTest() {}
override def getName = name
override def setUp = {
leftVal = super.toValue(lhs)
rightVal = super.toValue(rhs)
expecVal = super.toValue(expec)
}
def testBitOr = {
assertTrue(expecVal <= Operator.bopBitOr(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitOr(leftVal, rightVal) <= expecVal)
}
def testBitAnd = {
assertTrue(expecVal <= Operator.bopBitAnd(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitAnd(leftVal, rightVal) <= expecVal)
}
def testBitXor = {
assertTrue(expecVal <= Operator.bopBitXor(leftVal, rightVal))
if (equal) assertTrue(Operator.bopBitXor(leftVal, rightVal) <= expecVal)
}
def testLShift = {
assertTrue(expecVal <= Operator.bopLShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLShift(leftVal, rightVal) <= expecVal)
}
def testRShift = {
assertTrue(expecVal <= Operator.bopRShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopRShift(leftVal, rightVal) <= expecVal)
}
def testURShift = {
assertTrue(expecVal <= Operator.bopURShift(leftVal, rightVal))
if (equal) assertTrue(Operator.bopURShift(leftVal, rightVal) <= expecVal)
}
def testPlus = {
assertTrue(expecVal <= Operator.bopPlus(leftVal, rightVal))
if (equal) assertTrue(Operator.bopPlus(leftVal, rightVal) <= expecVal)
}
def testMinus = {
assertTrue(expecVal <= Operator.bopMinus(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMinus(leftVal, rightVal) <= expecVal)
}
def testMul = {
assertTrue(expecVal <= Operator.bopMul(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMul(leftVal, rightVal) <= expecVal)
}
def testDiv = {
assertTrue(expecVal <= Operator.bopDiv(leftVal, rightVal))
if (equal) assertTrue(Operator.bopDiv(leftVal, rightVal) <= expecVal)
}
def testMod = {
assertTrue(expecVal <= Operator.bopMod(leftVal, rightVal))
if (equal) assertTrue(Operator.bopMod(leftVal, rightVal) <= expecVal)
}
def testEq = {
assertTrue(expecVal <= Operator.bopEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopEq(leftVal, rightVal) <= expecVal)
}
def testNeq = {
assertTrue(expecVal <= Operator.bopNeq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopNeq(leftVal, rightVal) <= expecVal)
}
def testSEq = {
assertTrue(expecVal <= Operator.bopSEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopSEq(leftVal, rightVal) <= expecVal)
}
def testSNeq = {
assertTrue(expecVal <= Operator.bopSNeq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopSNeq(leftVal, rightVal) <= expecVal)
}
def testLess = {
assertTrue(expecVal <= Operator.bopLess(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLess(leftVal, rightVal) <= expecVal)
}
def testGreater = {
assertTrue(expecVal <= Operator.bopGreater(leftVal, rightVal))
if (equal) assertTrue(Operator.bopGreater(leftVal, rightVal) <= expecVal)
}
def testLessEq = {
assertTrue(expecVal <= Operator.bopLessEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopLessEq(leftVal, rightVal) <= expecVal)
}
def testGreaterEq = {
assertTrue(expecVal <= Operator.bopGreaterEq(leftVal, rightVal))
if (equal) assertTrue(Operator.bopGreaterEq(leftVal, rightVal) <= expecVal)
}
}
class UnaTest(name:String, oprnd:List[Any], expec:List[Any], equal:Boolean, func:String) extends OperatorTestCase(func) {
var oprndVal:Value = ValueBot
var expecVal:Value = ValueBot
def unaTest() {}
override def getName = name
override def setUp = {
oprndVal = super.toValue(oprnd)
expecVal = super.toValue(expec)
}
def testVoid = {
assertTrue(expecVal <= Operator.uVoid(oprndVal))
if (equal) assertTrue(Operator.uVoid(oprndVal) <= expecVal)
}
def testPlus = {
assertTrue(expecVal <= Operator.uopPlus(oprndVal))
if (equal) assertTrue(Operator.uopPlus(oprndVal) <= expecVal)
}
def testMinus = {
assertTrue(expecVal <= Operator.uopMinus(oprndVal))
if (equal) assertTrue(Operator.uopMinus(oprndVal) <= expecVal)
}
def testBitNeg = {
assertTrue(expecVal <= Operator.uopBitNeg(oprndVal))
if (equal) assertTrue(Operator.uopBitNeg(oprndVal) <= expecVal)
}
def testNeg = {
assertTrue(expecVal <= Operator.uopNeg(oprndVal))
if (equal) assertTrue(Operator.uopNeg(oprndVal) <= expecVal)
}
}
abstract class TypeOperator
/* Binary */
case class BinBitOr(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinBitAnd(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinBitXor(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinLShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinRShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinURShift(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinPlus(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMinus(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMul(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinDiv(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinMod(name:String, lhs:List[Any], rhs:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class BinEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinNeq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinSEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinSNeq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinLess(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinGreater(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinLessEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
case class BinGreaterEq(name:String, lhs:List[Any], rhs:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
/* Unary */
case class UnaVoid(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
//case class UnaTypeof(name:String, oprn:List[Any], expec:List[String], equal:Boolean) extends TypeOperator
case class UnaPlus(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaMinus(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaBitNeg(name:String, oprn:List[Any], expec:List[Any], equal:Boolean) extends TypeOperator
case class UnaNeg(name:String, oprn:List[Any], expec:List[Boolean], equal:Boolean) extends TypeOperator
| daejunpark/jsaf | src/kr/ac/kaist/jsaf/tests/TypingOperatorJUTest.scala | Scala | bsd-3-clause | 82,485 |
import java.util.UUID
import cats.effect.IO
import scala.util.Try
case class Person(name: String, age: Int)
object Endpoints {
import org.http4s._
import org.http4s.dsl.io._
val helloWorldService = HttpRoutes.of[IO] {
case GET -> Root / "hello" / IntVar(number) =>
Ok(s"Hello, your number is $number")
}
val asyncRequest = HttpRoutes.of[IO] {
case GET -> Root / "async" =>
Ok {
IO.async[String] { eitherCb =>
import org.asynchttpclient.Dsl._
val whenResponse = asyncHttpClient.
prepareGet("https://httpbin.org/get").execute()
whenResponse.toCompletableFuture.whenComplete((res, th) => {
if (th != null) {
eitherCb(Left(th))
} else eitherCb(Right(res.getResponseBody))
})
}
}
}
val jsonRequest = HttpRoutes.of[IO] {
case GET -> Root / "json" =>
import org.http4s.circe._ // EntityEncoder[IO, Json]
import io.circe.generic.auto._ // automatic codecs for Person
import io.circe.syntax._ // asJson method
Ok {
Person("Joe", 42).asJson
}
}
val idService = HttpRoutes.of[IO] {
case GET -> Root / "id" / UuidVar(id) =>
Ok(s"Your ID is $id")
}
val timeService = HttpRoutes.of[IO] {
case GET -> Root / "time" =>
Ok(System.currentTimeMillis().toString)
}
object UuidVar {
def unapply(s: String): Option[UUID] = {
Try { UUID.fromString(s) }.toOption
}
}
}
import cats.effect.{ExitCode, IO, IOApp}
object Http4sMain extends IOApp {
import Endpoints._
import cats.implicits._
import org.http4s.implicits._
import org.http4s.server.blaze._
import org.http4s.server.Router
val api = helloWorldService <+> timeService <+> idService <+> asyncRequest <+> jsonRequest
val httpApp = Router("/" -> api).orNotFound
def run(args: List[String]): IO[ExitCode] =
BlazeServerBuilder[IO]
.bindHttp(8080)
.withHttpApp(httpApp)
.serve
.compile
.drain
.as(ExitCode.Success)
}
| denisftw/advanced-scala-code | http4s/src/main/scala/Http4sMain.scala | Scala | apache-2.0 | 2,072 |
package net.badgerhunt.shares.render
class UserLogin extends Page {
val url = "login"
def render = Right(
<p>
<form method="post" action="/logging_in" name="login" id="login_form">
<p>Username: <input type="text" name="username"></input></p>
<p>Password: <input type="password" name="password"></input></p>
<p><input type="submit" name="Login"></input></p>
</form>
<a href="/register">Register as new user</a>
</p>)
} | Synesso/tofucube | src/main/scala/net/badgerhunt/shares/render/UserLogin.scala | Scala | bsd-2-clause | 456 |
package core.models.services
import scala.concurrent.Future
/**
* Service to get the health state of the app.
*/
trait HealthService {
/**
* Gets the health state of the app.
*
* @return True if the app is healthy, false otherwise.
*/
def get(): Future[Boolean]
}
| akkie/silhouette-play-react-seed | app-core/src/main/scala/core/models/services/HealthService.scala | Scala | mit | 285 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.