code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package org.chipmunk.util
import java.sql.DriverManager
import java.sql.Driver
import org.squeryl.PrimitiveTypeMode.inTransaction
import org.squeryl.Session
import org.squeryl.SessionFactory
import org.squeryl.internals.DatabaseAdapter
object Configurator {
def initialize(
driverName: String,
adapter: DatabaseAdapter,
dbUrl: String)
: Unit = {
// initialize driver in the old JDBC fashion
val clazz = Class.forName(driverName)
val driverClass = clazz.asInstanceOf[Class[_ <: Driver]]
initialize(driverClass, adapter, dbUrl)
}
def initialize(
driver: Class[_ <: Driver],
adapter: DatabaseAdapter,
dbUrl: String)
: Unit = {
SessionFactory.concreteFactory = Some(() => {
val connection = DriverManager.getConnection(dbUrl)
Session.create(connection, adapter)
})
}
def logSql(logger: String => Unit = { println(_) }): Unit = {
inTransaction { Session.currentSession.setLogger(logger) }
}
def stopLoggingSql(): Unit = { logSql(_ => Unit) }
}
| kpjjpk/chipmunk | src/main/scala/org/chipmunk/util/Configurator.scala | Scala | mit | 1,028 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand
import sangria.validation._
import scala.collection.mutable.{Set ⇒ MutableSet}
/**
* Unique fragment names
*
* A GraphQL document is only valid if all defined fragments have unique names.
*/
class UniqueFragmentNames extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
val knownFragmentNames = MutableSet[String]()
override val onEnter: ValidationVisit = {
case fragDef: ast.FragmentDefinition ⇒
if (knownFragmentNames contains fragDef.name)
Left(Vector(DuplicateFragmentNameViolation(fragDef.name, ctx.sourceMapper, fragDef.position.toList)))
else {
knownFragmentNames += fragDef.name
AstVisitorCommand.RightContinue
}
}
}
} | PhilAndrew/JumpMicro | JMSangriaGraphql/src/main/scala/sangria/validation/rules/UniqueFragmentNames.scala | Scala | mit | 849 |
package persistence.dal
import io.getquill._
import utils._
import org.scalatest.{FunSuite, Suite}
trait AbstractPersistenceTest extends FunSuite { this: Suite =>
trait Modules extends ConfigurationModuleImpl with PersistenceModuleTest {
}
trait PersistenceModuleTest extends PersistenceModule with DbContext{
this: Configuration =>
override lazy val context = new PostgresAsyncContext[SnakeCase]("quilltest")
override val suppliersDal: SuppliersDal = new SuppliersDalImpl(context)
val self = this
}
} | cdiniz/quill-async-akka-http | src/test/scala/persistence/dal/AbstractPersistenceTest.scala | Scala | apache-2.0 | 536 |
package com.twitter.zipkin.storage.redis
import scala.collection.immutable.List
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.zipkin.thriftscala
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.common.{Annotation, BinaryAnnotation, Span}
/**
* Tests the RedisSnappyThriftCodec
*/
class RedisSnappyThriftCodecSpec extends RedisSpecification {
val thriftCodec = new RedisSnappyThriftCodec(new BinaryThriftStructSerializer[thriftscala.Span] {
override def codec = thriftscala.Span
})
val span = Span(1L, "name", 2L, Option(3L), List[Annotation](),
Seq[BinaryAnnotation](), true).toThrift
test("compress and decompress should yield an equal object") {
val bytes = thriftCodec.encode(span)
val actualOutput = thriftCodec.decode(bytes)
assertResult (span) (actualOutput)
}
}
| srijs/zipkin | zipkin-redis/src/test/scala/com/twitter/zipkin/storage/redis/RedisSnappyThriftCodec.scala | Scala | apache-2.0 | 876 |
object Main {
def main(args: Array[String]) {
try {
assert(args.length >= 1)
val elem = Integer.parseInt(args(0))
if (elem >= 0) {
println("Positive number")
} else {
println("Negative number")
}
} catch {
case e: NumberFormatException =>
println("Usage: scala Main <n1>")
}
}
}
| dzufferey/picasso | frontend/compilerPlugin/src/test/resources/plugin/if.scala | Scala | bsd-2-clause | 358 |
export SPARK_CLASSPATH=/home/epiclulz/local/cs/lib/cs.jar
export ADD_JARS=/home/epiclulz/local/cs/lib/cs.jar
/home/epiclulz/local/spark/bin/spark-shell --master spark://solo-kumbu:7077 --jars $ADD_JARS
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark._
//--
import org.apache.spark.graphx._
import org.apache.spark.graphx.lib._
import org.apache.spark.rdd.RDD
//-from-here
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.DefaultFormats
import scala.util.control.NonFatal
import scala.util.Try
//import org.json4s.Format
import org.apache.spark.SparkContext._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
import org.json4s.JsonAST._
implicit lazy val formats = org.json4s.DefaultFormats
//--
import org.clearspace.spark._
case class NodeFromJson(
hash: Long,
name: String,
discovered_on: Long,
properties: Map[String, String],
node_type: String,
id: Long)
case class RelFromJson(
rel_type: String,
properties: Map[String, String],
src_id: Long,
name: String,
discovered_on: Long,
hash: Long,
seen_count: Int,
seen_on: Long,
id: Long,
dst_id: Long)
def nodeParser(s: String): Option[(Long, (String, String, Long, Long, Map[String, String]))] = {
Try {
val p = parse(s).extract[NodeFromJson]
(p.id, (p.name, p.node_type,
p.discovered_on, p.id,
p.properties))
}.toOption
}
def nodeParser(s: String): Boolean = {
val p = Try(parse(s).extract[NodeFromJson]) match {
case Success(lines) => True
case Failure(_) => False
}
}
def nodeParser(s: String): (Long, (String, String, Long, Long, Map[String, String])) = {
val p = parse(s).extract[NodeFromJson]
(p.id, (p.name, p.node_type,
p.discovered_on, p.id,
p.properties))
}
val nodeFile = sc.textFile("/home/epiclulz/workspace/tmp/nodes.json")
val vertexes: RDD[(VertexId, (String, String, Long, Long, Map[String, String]))] = sc.parallelize(nodeFile.flatMap(a => nodeParser(a)))
val vertexes: RDD[(VertexId, (String, String, Long, Long, Map[String, String]))] = nodeFile.map(a => nodeParser(a))
vertexes.count
def relParser(s: String): Option[Edge[(String, String, Int, Long, Long, Long, Long, Long, Map[String, String])]] = {
Try {
val p = parse(s).extract[RelFromJson];
Edge(p.src_id, p.dst_id,
(p.name, p.rel_type, p.seen_count,
p.discovered_on, p.seen_on, p.id,
p.src_id, p.dst_id, p.properties))
}.toOption
}
val nodeFile = sc.textFile("/home/epiclulz/workspace/tmp/nodes.json")
val relFile = sc.textFile("/home/epiclulz/workspace/tmp/rels.json")
val vertexes: RDD[(VertexId, (String, String, Long, Long, Map[String, String]))] = nodeFile.flatMap(a => nodeParser(a))
val relationships: RDD[Edge[(String, String, Int, Long, Long, Long, Long, Long, Map[String, String])]] = relFile.flatMap(a => relParser(a))
val defaultUser = ("John Doe", "Missing", 0, 0, 0, Map("Nothing" -> "Here"))
val graph = Graph(vertexes, relationships)
| hihellobolke/clearspark | scala/spark-shell.scala | Scala | apache-2.0 | 3,102 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.schema
import org.apache.flink.table.catalog.{ObjectIdentifier, ResolvedCatalogTable}
import org.apache.flink.table.connector.source.DynamicTableSource
import org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptSchema
import org.apache.calcite.rel.`type`.RelDataType
import java.util
/**
* A [[FlinkPreparingTableBase]] implementation which defines the context variables
* required to translate the Calcite [[org.apache.calcite.plan.RelOptTable]] to the Flink specific
* relational expression with [[DynamicTableSource]].
*
* @param relOptSchema The RelOptSchema that this table comes from
* @param tableIdentifier The full path of the table to retrieve.
* @param rowType The table row type
* @param statistic The table statistics
* @param tableSource The [[DynamicTableSource]] for which is converted to a Calcite Table
* @param isStreamingMode A flag that tells if the current table is in stream mode
* @param catalogTable Resolved catalog table where this table source table comes from
* @param extraDigests The extra digests which will be added into `getQualifiedName`
* as a part of table digest
*/
class TableSourceTable(
relOptSchema: RelOptSchema,
val tableIdentifier: ObjectIdentifier,
rowType: RelDataType,
statistic: FlinkStatistic,
val tableSource: DynamicTableSource,
val isStreamingMode: Boolean,
val catalogTable: ResolvedCatalogTable,
val extraDigests: Array[String] = Array.empty,
val abilitySpecs: Array[SourceAbilitySpec] = Array.empty)
extends FlinkPreparingTableBase(
relOptSchema,
rowType,
util.Arrays.asList(
tableIdentifier.getCatalogName,
tableIdentifier.getDatabaseName,
tableIdentifier.getObjectName),
statistic) {
override def getQualifiedName: util.List[String] = {
val builder = ImmutableList.builder[String]()
.addAll(super.getQualifiedName)
extraDigests.foreach(builder.add)
builder.build()
}
/**
* Creates a copy of this table with specified digest.
*
* @param newTableSource tableSource to replace
* @param newRowType new row type
* @return added TableSourceTable instance with specified digest
*/
def copy(
newTableSource: DynamicTableSource,
newRowType: RelDataType,
newExtraDigests: Array[String],
newAbilitySpecs: Array[SourceAbilitySpec]): TableSourceTable = {
new TableSourceTable(
relOptSchema,
tableIdentifier,
newRowType,
statistic,
newTableSource,
isStreamingMode,
catalogTable,
extraDigests ++ newExtraDigests,
abilitySpecs ++ newAbilitySpecs
)
}
/**
* Creates a copy of this table with specified digest and statistic.
*
* @param newTableSource tableSource to replace
* @param newStatistic statistic to replace
* @return added TableSourceTable instance with specified digest and statistic
*/
def copy(
newTableSource: DynamicTableSource,
newStatistic: FlinkStatistic,
newExtraDigests: Array[String],
newAbilitySpecs: Array[SourceAbilitySpec]): TableSourceTable = {
new TableSourceTable(
relOptSchema,
tableIdentifier,
rowType,
newStatistic,
newTableSource,
isStreamingMode,
catalogTable,
extraDigests ++ newExtraDigests,
abilitySpecs ++ newAbilitySpecs)
}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/schema/TableSourceTable.scala | Scala | apache-2.0 | 4,374 |
/**
* Copyright 2013 Gianluca Amato
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.narrowings
import it.unich.jandom.domains.AbstractProperty
/**
* This is the trait for narrowings, operators used to accelerate fixpoint computations.
* @author Gianluca Amato <gamato@unich.it>
*/
trait Narrowing {
/**
* @param current the property at the current iteration.
* @param next the property at the next iteration. This IS assumed to be smaller than current.
* @return the result of the narrowing.
*/
def apply[Property <: AbstractProperty[Property]](current: Property, next: Property): Property
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/narrowings/Narrowing.scala | Scala | lgpl-3.0 | 1,300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.batch.task.input
import com.bwsw.common.SerializerInterface
import com.bwsw.sj.common.engine.core.entities.{Envelope, KafkaEnvelope, TStreamEnvelope}
import com.bwsw.sj.common.engine.core.managment.CommonTaskManager
import com.bwsw.tstreams.agents.group.CheckpointGroup
import com.typesafe.scalalogging.Logger
import scaldi.Injector
/**
* Class is responsible for handling kafka and t-stream input
* (i.e. retrieving and checkpointing kafka and t-stream messages)
* for batch streaming engine
*
* @param manager allows to manage an environment of batch streaming task
* @author Kseniya Mikhaleva
*/
class RetrievableCompleteCheckpointTaskInput[T <: AnyRef](manager: CommonTaskManager,
checkpointGroup: CheckpointGroup,
envelopeDataSerializer: SerializerInterface,
lowWatermark: Int)
(implicit injector: Injector)
extends RetrievableCheckpointTaskInput[Envelope](manager.inputs) {
private val logger = Logger(this.getClass)
private val retrievableKafkaTaskInput = new RetrievableKafkaCheckpointTaskInput[T](
manager,
checkpointGroup,
envelopeDataSerializer,
lowWatermark)
private val retrievableTStreamTaskInput = new RetrievableTStreamCheckpointTaskInput[T](
manager,
checkpointGroup,
envelopeDataSerializer,
lowWatermark)
private val taskInputByStream =
Seq(retrievableKafkaTaskInput, retrievableTStreamTaskInput)
.map(_.asInstanceOf[RetrievableCheckpointTaskInput[Envelope]])
.flatMap(taskInput => taskInput.inputs.keys.map(s => s.name -> taskInput)).toMap
override def registerEnvelope(envelope: Envelope): Unit = {
envelope match {
case tstreamEnvelope: TStreamEnvelope[T] =>
retrievableTStreamTaskInput.registerEnvelope(tstreamEnvelope)
case kafkaEnvelope: KafkaEnvelope[T] =>
retrievableKafkaTaskInput.registerEnvelope(kafkaEnvelope)
case wrongEnvelope =>
logger.error(s"Incoming envelope with type: ${wrongEnvelope.getClass} is not defined for batch streaming engine")
throw new Exception(s"Incoming envelope with type: ${wrongEnvelope.getClass} is not defined for batch streaming engine")
}
}
override def get(stream: String): Iterable[Envelope] =
taskInputByStream(stream).get(stream)
override def setConsumerOffset(envelope: Envelope): Unit = {
envelope match {
case tstreamEnvelope: TStreamEnvelope[T] =>
retrievableTStreamTaskInput.setConsumerOffset(tstreamEnvelope)
case kafkaEnvelope: KafkaEnvelope[T] =>
retrievableKafkaTaskInput.setConsumerOffset(kafkaEnvelope)
case wrongEnvelope =>
logger.error(s"Incoming envelope with type: ${wrongEnvelope.getClass} is not defined for batch streaming engine")
throw new Exception(s"Incoming envelope with type: ${wrongEnvelope.getClass} is not defined for batch streaming engine")
}
}
override def setConsumerOffsetToLastEnvelope(): Unit = {
retrievableKafkaTaskInput.setConsumerOffsetToLastEnvelope()
retrievableTStreamTaskInput.setConsumerOffsetToLastEnvelope()
}
override def close(): Unit = {
retrievableKafkaTaskInput.close()
retrievableTStreamTaskInput.close()
}
}
| bwsw/sj-platform | core/sj-batch-streaming-engine/src/main/scala/com/bwsw/sj/engine/batch/task/input/RetrievableCompleteCheckpointTaskInput.scala | Scala | apache-2.0 | 4,226 |
// Databricks notebook source exported at Mon, 14 Mar 2016 04:43:58 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
// COMMAND ----------
// MAGIC %md
// MAGIC This is an elaboration of the [Apache Spark 1.6 sql-progamming-guide](http://spark.apache.org/docs/latest/sql-programming-guide.html).
// MAGIC
// MAGIC # [Performance Tuning](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/004_performanceTuning_sqlProgGuide)
// MAGIC
// MAGIC ## [Spark Sql Programming Guide](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/000_sqlProgGuide)
// MAGIC
// MAGIC - [Overview](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/001_overview_sqlProgGuide)
// MAGIC - SQL
// MAGIC - DataFrames
// MAGIC - Datasets
// MAGIC - [Getting Started](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/002_gettingStarted_sqlProgGuide)
// MAGIC - Starting Point: SQLContext
// MAGIC - Creating DataFrames
// MAGIC - DataFrame Operations
// MAGIC - Running SQL Queries Programmatically
// MAGIC - Creating Datasets
// MAGIC - Interoperating with RDDs
// MAGIC - Inferring the Schema Using Reflection
// MAGIC - Programmatically Specifying the Schema
// MAGIC - [Data Sources](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/003_dataSources_sqlProgGuide)
// MAGIC - Generic Load/Save Functions
// MAGIC - Manually Specifying Options
// MAGIC - Run SQL on files directly
// MAGIC - Save Modes
// MAGIC - Saving to Persistent Tables
// MAGIC - Parquet Files
// MAGIC - Loading Data Programmatically
// MAGIC - Partition Discovery
// MAGIC - Schema Merging
// MAGIC - Hive metastore Parquet table conversion
// MAGIC - Hive/Parquet Schema Reconciliation
// MAGIC - Metadata Refreshing
// MAGIC - Configuration
// MAGIC - JSON Datasets
// MAGIC - Hive Tables
// MAGIC - Interacting with Different Versions of Hive Metastore
// MAGIC - JDBC To Other Databases
// MAGIC - Troubleshooting
// MAGIC - [Performance Tuning](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/004_performanceTuning_sqlProgGuide)
// MAGIC - Caching Data In Memory
// MAGIC - Other Configuration Options
// MAGIC - [Distributed SQL Engine](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/005_distributedSqlEngine_sqlProgGuide)
// MAGIC - Running the Thrift JDBC/ODBC server
// MAGIC - Running the Spark SQL CLI
// COMMAND ----------
// MAGIC %md
// MAGIC # [Performance Tuning](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/004_performanceTuning_sqlProgGuide)
// MAGIC
// MAGIC For some workloads it is possible to improve performance by either
// MAGIC caching data in memory, or by turning on some experimental options.
// MAGIC
// MAGIC Caching Data In Memory
// MAGIC ----------------------
// MAGIC
// MAGIC Spark SQL can cache tables using an in-memory columnar format by calling
// MAGIC `sqlContext.cacheTable("tableName")` or `dataFrame.cache()`. Then Spark
// MAGIC SQL will scan only required columns and will automatically tune
// MAGIC compression to minimize memory usage and GC pressure. You can call
// MAGIC `sqlContext.uncacheTable("tableName")` to remove the table from memory.
// MAGIC
// MAGIC Configuration of in-memory caching can be done using the `setConf`
// MAGIC method on `SQLContext` or by running `SET key=value` commands using SQL.
// MAGIC
// MAGIC Property Name Default Meaning
// MAGIC ------------------------------------------------ --------- --------------------------------------------------------------------------------------------------------------------------------------------------------
// MAGIC `spark.sql.inMemoryColumnarStorage.compressed` true When set to true Spark SQL will automatically select a compression codec for each column based on statistics of the data.
// MAGIC `spark.sql.inMemoryColumnarStorage.batchSize` 10000 Controls the size of batches for columnar caching. Larger batch sizes can improve memory utilization and compression, but risk OOMs when caching data.
// MAGIC
// MAGIC Other Configuration Options
// MAGIC ---------------------------
// MAGIC
// MAGIC The following options can also be used to tune the performance of query
// MAGIC execution. It is possible that these options will be deprecated in
// MAGIC future release as more optimizations are performed automatically.
// MAGIC
// MAGIC Property Name Default Meaning
// MAGIC ---------------------------------------- ------------------ -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
// MAGIC `spark.sql.autoBroadcastJoinThreshold` 10485760 (10 MB) Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run.
// MAGIC `spark.sql.tungsten.enabled` true When true, use the optimized Tungsten physical execution backend which explicitly manages memory and dynamically generates bytecode for expression evaluation.
// MAGIC `spark.sql.shuffle.partitions` 200 Configures the number of partitions to use when shuffling data for joins or aggregations.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | lamastex/scalable-data-science | db/xtraResources/ProgGuides1_6/sqlProgrammingGuide/004_performanceTuning_sqlProgGuide.scala | Scala | unlicense | 7,477 |
package score.discord.canti.command.api
import net.dv8tion.jda.api.entities.User
import net.dv8tion.jda.api.Permission
import score.discord.canti.wrappers.jda.ID
import score.discord.canti.wrappers.jda.Conversions.richSnowflake
trait CommandPermissions:
def canExecute(origin: CommandInvoker): Boolean
def description: String
object CommandPermissions:
object ServerAdminOnly extends CommandPermissions:
override def canExecute(origin: CommandInvoker): Boolean =
origin.member.exists(_.hasPermission(Permission.MANAGE_SERVER))
override def description = "Only server admins may use this command."
object Anyone extends CommandPermissions:
override def canExecute(origin: CommandInvoker): Boolean = true
override def description = "Anyone may use this command."
class OneUserOnly(userId: ID[User]) extends CommandPermissions:
override def canExecute(origin: CommandInvoker) =
origin.user.id == userId
override def description = s"This command may only be run by <@$userId>"
| ScoreUnder/canti-bot | src/main/scala/score/discord/canti/command/api/CommandPermissions.scala | Scala | agpl-3.0 | 1,028 |
/*
* This file is part of the sohva project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.sohva
package test
import org.scalatest._
import org.scalatest.OptionValues._
import gnieh.diffson.sprayJson._
import spray.json._
class TestBasic extends SohvaTestSpec with Matchers {
"an unknown document" should "not be retrieved" in {
synced(db.getDocById[TestDoc]("unknown-doc")) should be(None)
}
it should "be added correctly and can then be retrieved" in {
val doc = TestDoc2("new-doc", 4)
val saved = synced(db.saveDoc(doc))
saved should have(
'_id("new-doc"),
'toto(4))
synced(db.getDocById[TestDoc2]("new-doc")) should be(Some(saved))
}
"an existing document" should "have a revision" in {
synced(db.getDocById[TestDoc2]("new-doc")) match {
case Some(doc) => doc._rev should not be (None)
case None => fail("The document with id `new-doc` should exist")
}
}
it should "not be saved if we have an outdated version" in {
synced(db.getDocById[TestDoc2]("new-doc")) match {
case Some(doc) =>
val thrown = the[SohvaException] thrownBy {
synced(db.saveDoc(doc.copy(toto = 1).withRev(Some("0-0"))))
}
val cause = CauseMatchers.findExpectedExceptionRecursively[ConflictException](thrown)
case None =>
fail("The document with id `new-doc` should exist")
}
}
it should "be saved if we have the last version and then get a new revision" in {
synced(db.getDocById[TestDoc2]("new-doc")) match {
case Some(doc) =>
val newest = synced(db.saveDoc(doc.copy(toto = 1).withRev(doc._rev)))
newest.toto should be(1)
newest._rev.value should not be (doc._rev.get)
case None =>
fail("The document with id `new-doc` should exist")
}
}
it should "be patchable" in {
synced(db.getDocRevision("new-doc")) match {
case Some(rev) =>
val patch = JsonPatch.parse("""[{ "op": "replace", "path": "/toto", "value": 453 }]""")
val newest = synced(db.patchDoc[TestDoc2]("new-doc", rev, patch))
newest.toto should be(453)
newest._rev.value should not be (rev)
case None =>
fail("The document with id `new-doc` should exist")
}
}
case class StringDoc(_id: String, value: String) extends IdRev
implicit val stringDocFormat = couchFormat[StringDoc]
"a document" should "be sent encoded in UTF-8" in {
val doc = StringDoc("utf8-doc", "éßèüäöàç€ẞÐẞŁª€ªÐŁ")
val saved = synced(db.saveDoc(doc))
saved._rev should be('defined)
saved.value should be(doc.value)
}
it should "be created into the database with a new identifier if none is given" in {
val doc = NoCouchDoc(value = 3)
implicit val noCouchDoc = jsonFormat1(NoCouchDoc)
synced(db.createDoc(doc)) match {
case OkResult(true, id, rev) =>
val newId = id.value
val saved = synced(db.getDocById[JsValue](newId))
saved.value match {
case JsObject(fields) =>
fields.get("value").value should be(JsNumber(3))
fields.get("_rev").value should be(JsString(rev.value))
case _ =>
fail("An object was expected")
}
case _ =>
fail("The document should have been saved")
}
}
}
case class NoCouchDoc(value: Int)
| gnieh/sohva | src/test/scala/gnieh/sohva/test/TestBasic.scala | Scala | apache-2.0 | 3,881 |
package io.circe
import cats.data.Validated
/**
* [[Decoder]] and [[Encoder]] instances for disjunction types with reasonable names for the sides.
*/
object disjunctionCodecs {
private[this] final val leftKey: String = "Left"
private[this] final val rightKey: String = "Right"
private[this] final val failureKey: String = "Invalid"
private[this] final val successKey: String = "Valid"
implicit final def decoderEither[A, B](implicit
da: Decoder[A],
db: Decoder[B]
): Decoder[Either[A, B]] = Decoder.decodeEither(leftKey, rightKey)
implicit final def decodeValidated[E, A](implicit
de: Decoder[E],
da: Decoder[A]
): Decoder[Validated[E, A]] = Decoder.decodeValidated(failureKey, successKey)
implicit final def encodeEither[A, B](implicit
ea: Encoder[A],
eb: Encoder[B]
): Encoder.AsObject[Either[A, B]] =
Encoder.encodeEither(leftKey, rightKey)
implicit final def encodeValidated[E, A](implicit
ee: Encoder[E],
ea: Encoder[A]
): Encoder.AsObject[Validated[E, A]] =
Encoder.encodeValidated(failureKey, successKey)
}
| travisbrown/circe | modules/core/shared/src/main/scala/io/circe/disjunctionCodecs.scala | Scala | apache-2.0 | 1,086 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.api.{ApiVersion, KAFKA_0_8_2}
import kafka.message._
import kafka.utils.{CoreUtils, TestUtils}
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Assert._
import org.junit.Test
import org.scalatest.Assertions.intercept
class KafkaConfigTest {
@Test
def testLogRetentionTimeHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals(60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMinutesProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "10")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionUnlimited() {
val props1 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props4 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props5 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "-1")
props2.put("log.retention.minutes", "-1")
props3.put("log.retention.hours", "-1")
val cfg1 = KafkaConfig.fromProps(props1)
val cfg2 = KafkaConfig.fromProps(props2)
val cfg3 = KafkaConfig.fromProps(props3)
assertEquals("Should be -1", -1, cfg1.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg2.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg3.logRetentionTimeMillis)
props4.put("log.retention.ms", "-1")
props4.put("log.retention.minutes", "30")
val cfg4 = KafkaConfig.fromProps(props4)
assertEquals("Should be -1", -1, cfg4.logRetentionTimeMillis)
props5.put("log.retention.ms", "0")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props5)
}
}
@Test
def testLogRetentionValid {
val props1 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "0")
props2.put("log.retention.minutes", "0")
props3.put("log.retention.hours", "0")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props1)
}
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props2)
}
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props3)
}
}
@Test
def testAdvertiseDefaults() {
val port = "9999"
val hostName = "fake-host"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.remove(KafkaConfig.ListenersProp)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseConfigured() {
val advertisedHostName = "routable-host"
val advertisedPort = "1234"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testAdvertisePortDefault() {
val advertisedHostName = "routable-host"
val port = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseHostNameDefault() {
val hostName = "routable-host"
val advertisedPort = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testDuplicateListeners() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// listeners with duplicate port
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
// listeners with duplicate protocol
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,PLAINTEXT://localhost:9092")
assert(!isValidKafkaConfig(props))
// advertised listeners with duplicate port
props.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testBadListenerProtocol() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
props.put(KafkaConfig.ListenersProp, "BAD://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testCaseInsensitiveListenerProtocol() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
props.put(KafkaConfig.ListenersProp, "plaintext://localhost:9091,SsL://localhost:9092")
assert(isValidKafkaConfig(props))
}
@Test
def testListenerDefaults() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// configuration with host and port, but no listeners
props.put(KafkaConfig.HostNameProp, "myhost")
props.put(KafkaConfig.PortProp, "1111")
val conf = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://myhost:1111"), conf.listeners)
// configuration with null host
props.remove(KafkaConfig.HostNameProp)
val conf2 = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.listeners)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.advertisedListeners)
assertEquals(null, conf2.listeners(SecurityProtocol.PLAINTEXT).host)
// configuration with advertised host and port, and no advertised listeners
props.put(KafkaConfig.AdvertisedHostNameProp, "otherhost")
props.put(KafkaConfig.AdvertisedPortProp, "2222")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(conf3.advertisedListeners, CoreUtils.listenerListToEndPoints("PLAINTEXT://otherhost:2222"))
}
@Test
def testVersionConfiguration() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
val conf = KafkaConfig.fromProps(props)
assertEquals(ApiVersion.latestVersion, conf.interBrokerProtocolVersion)
props.put(KafkaConfig.InterBrokerProtocolVersionProp, "0.8.2.0")
// We need to set the message format version to make the configuration valid.
props.put(KafkaConfig.LogMessageFormatVersionProp, "0.8.2.0")
val conf2 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_0_8_2, conf2.interBrokerProtocolVersion)
// check that 0.8.2.0 is the same as 0.8.2.1
props.put(KafkaConfig.InterBrokerProtocolVersionProp, "0.8.2.1")
// We need to set the message format version to make the configuration valid
props.put(KafkaConfig.LogMessageFormatVersionProp, "0.8.2.1")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_0_8_2, conf3.interBrokerProtocolVersion)
//check that latest is newer than 0.8.2
assert(ApiVersion.latestVersion >= conf3.interBrokerProtocolVersion)
}
private def isValidKafkaConfig(props: Properties): Boolean = {
try {
KafkaConfig.fromProps(props)
true
} catch {
case _: IllegalArgumentException => false
}
}
@Test
def testUncleanLeaderElectionDefault() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionDisabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(false))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, false)
}
@Test
def testUncleanElectionEnabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(true))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionInvalid() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, "invalid")
intercept[ConfigException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testLogRollTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeBothMsAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRollTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRollTimeMillis )
}
@Test
def testDefaultCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "producer")
}
@Test
def testValidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put("compression.type", "gzip")
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "gzip")
}
@Test
def testInvalidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.CompressionTypeProp, "abc")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testInvalidInterBrokerSecurityProtocol() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.ListenersProp, "SSL://localhost:0")
props.put(KafkaConfig.InterBrokerSecurityProtocolProp, SecurityProtocol.PLAINTEXT.toString)
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testEqualAdvertisedListenersProtocol() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9092,SSL://localhost:9093")
props.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9092,SSL://localhost:9093")
KafkaConfig.fromProps(props)
}
@Test
def testInvalidAdvertisedListenersProtocol() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.ListenersProp, "TRACE://localhost:9091,SSL://localhost:9093")
props.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9092")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testFromPropsInvalid() {
def getBaseProperties(): Properties = {
val validRequiredProperties = new Properties()
validRequiredProperties.put(KafkaConfig.ZkConnectProp, "127.0.0.1:2181")
validRequiredProperties
}
// to ensure a basis is valid - bootstraps all needed validation
KafkaConfig.fromProps(getBaseProperties())
KafkaConfig.configNames().foreach(name => {
name match {
case KafkaConfig.ZkConnectProp => // ignore string
case KafkaConfig.ZkSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkConnectionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkSyncTimeMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkEnableSecureAclsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean")
case KafkaConfig.BrokerIdProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumNetworkThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.NumIoThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.BackgroundThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.QueuedMaxRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.RequestTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.AuthorizerClassNameProp => //ignore string
case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.HostNameProp => // ignore string
case KafkaConfig.AdvertisedHostNameProp => //ignore string
case KafkaConfig.AdvertisedPortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.SocketSendBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.SocketReceiveBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.MaxConnectionsPerIpOverridesProp =>
assertPropertyInvalid(getBaseProperties(), name, "127.0.0.1:not_a_number")
case KafkaConfig.ConnectionsMaxIdleMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumPartitionsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogDirsProp => // ignore string
case KafkaConfig.LogDirProp => // ignore string
case KafkaConfig.LogSegmentBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", Message.MinMessageOverhead - 1)
case KafkaConfig.LogRollTimeMillisProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRollTimeHoursProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeMillisProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeMinutesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeHoursProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanupIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogCleanupPolicyProp => assertPropertyInvalid(getBaseProperties(), name, "unknown_policy", "0")
case KafkaConfig.LogCleanerIoMaxBytesPerSecondProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerDedupeBufferSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "1024")
case KafkaConfig.LogCleanerDedupeBufferLoadFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean")
case KafkaConfig.LogCleanerDeleteRetentionMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerMinCompactionLagMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerMinCleanRatioProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogIndexSizeMaxBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "3")
case KafkaConfig.LogFlushIntervalMessagesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogFlushSchedulerIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogFlushIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumRecoveryThreadsPerDataDirProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.AutoCreateTopicsEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.MinInSyncReplicasProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.ControllerSocketTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.DefaultReplicationFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaLagTimeMaxMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaSocketTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-2")
case KafkaConfig.ReplicaSocketReceiveBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchMaxBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchWaitMaxMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchMinBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchResponseMaxBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumReplicaFetchersProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.AutoLeaderRebalanceEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.LeaderImbalancePerBrokerPercentageProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.UncleanLeaderElectionEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.ControlledShutdownMaxRetriesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ControlledShutdownRetryBackoffMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ControlledShutdownEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.GroupMinSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.GroupMaxSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.OffsetMetadataMaxSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.OffsetsLoadBufferSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicReplicationFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicPartitionsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicSegmentBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicCompressionCodecProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1")
case KafkaConfig.OffsetsRetentionMinutesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsRetentionCheckIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetCommitTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetCommitRequiredAcksProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-2")
case KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.NumQuotaSamplesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.QuotaWindowSizeSecondsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.DeleteTopicEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.MetricNumSamplesProp => assertPropertyInvalid(getBaseProperties, name, "not_a_number", "-1", "0")
case KafkaConfig.MetricSampleWindowMsProp => assertPropertyInvalid(getBaseProperties, name, "not_a_number", "-1", "0")
case KafkaConfig.MetricReporterClassesProp => // ignore string
case KafkaConfig.RackProp => // ignore string
//SSL Configs
case KafkaConfig.PrincipalBuilderClassProp =>
case KafkaConfig.SslProtocolProp => // ignore string
case KafkaConfig.SslProviderProp => // ignore string
case KafkaConfig.SslEnabledProtocolsProp =>
case KafkaConfig.SslKeystoreTypeProp => // ignore string
case KafkaConfig.SslKeystoreLocationProp => // ignore string
case KafkaConfig.SslKeystorePasswordProp => // ignore string
case KafkaConfig.SslKeyPasswordProp => // ignore string
case KafkaConfig.SslTruststoreTypeProp => // ignore string
case KafkaConfig.SslTruststorePasswordProp => // ignore string
case KafkaConfig.SslTruststoreLocationProp => // ignore string
case KafkaConfig.SslKeyManagerAlgorithmProp =>
case KafkaConfig.SslTrustManagerAlgorithmProp =>
case KafkaConfig.SslClientAuthProp => // ignore string
case KafkaConfig.SslEndpointIdentificationAlgorithmProp => // ignore string
case KafkaConfig.SslSecureRandomImplementationProp => // ignore string
case KafkaConfig.SslCipherSuitesProp => // ignore string
//Sasl Configs
case KafkaConfig.SaslMechanismInterBrokerProtocolProp => // ignore
case KafkaConfig.SaslEnabledMechanismsProp =>
case KafkaConfig.SaslKerberosServiceNameProp => // ignore string
case KafkaConfig.SaslKerberosKinitCmdProp =>
case KafkaConfig.SaslKerberosTicketRenewWindowFactorProp =>
case KafkaConfig.SaslKerberosTicketRenewJitterProp =>
case KafkaConfig.SaslKerberosMinTimeBeforeReloginProp =>
case KafkaConfig.SaslKerberosPrincipalToLocalRulesProp => // ignore string
case _ => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1")
}
})
}
@Test
def testSpecificProperties(): Unit = {
val defaults = new Properties()
defaults.put(KafkaConfig.ZkConnectProp, "127.0.0.1:2181")
// For ZkConnectionTimeoutMs
defaults.put(KafkaConfig.ZkSessionTimeoutMsProp, "1234")
defaults.put(KafkaConfig.BrokerIdGenerationEnableProp, "false")
defaults.put(KafkaConfig.MaxReservedBrokerIdProp, "1")
defaults.put(KafkaConfig.BrokerIdProp, "1")
defaults.put(KafkaConfig.HostNameProp, "127.0.0.1")
defaults.put(KafkaConfig.PortProp, "1122")
defaults.put(KafkaConfig.MaxConnectionsPerIpOverridesProp, "127.0.0.1:2, 127.0.0.2:3")
defaults.put(KafkaConfig.LogDirProp, "/tmp1,/tmp2")
defaults.put(KafkaConfig.LogRollTimeHoursProp, "12")
defaults.put(KafkaConfig.LogRollTimeJitterHoursProp, "11")
defaults.put(KafkaConfig.LogRetentionTimeHoursProp, "10")
//For LogFlushIntervalMsProp
defaults.put(KafkaConfig.LogFlushSchedulerIntervalMsProp, "123")
defaults.put(KafkaConfig.OffsetsTopicCompressionCodecProp, SnappyCompressionCodec.codec.toString)
val config = KafkaConfig.fromProps(defaults)
assertEquals("127.0.0.1:2181", config.zkConnect)
assertEquals(1234, config.zkConnectionTimeoutMs)
assertEquals(false, config.brokerIdGenerationEnable)
assertEquals(1, config.maxReservedBrokerId)
assertEquals(1, config.brokerId)
assertEquals("127.0.0.1", config.hostName)
assertEquals(1122, config.advertisedPort)
assertEquals("127.0.0.1", config.advertisedHostName)
assertEquals(Map("127.0.0.1" -> 2, "127.0.0.2" -> 3), config.maxConnectionsPerIpOverrides)
assertEquals(List("/tmp1", "/tmp2"), config.logDirs)
assertEquals(12 * 60L * 1000L * 60, config.logRollTimeMillis)
assertEquals(11 * 60L * 1000L * 60, config.logRollTimeJitterMillis)
assertEquals(10 * 60L * 1000L * 60, config.logRetentionTimeMillis)
assertEquals(123L, config.logFlushIntervalMs)
assertEquals(SnappyCompressionCodec, config.offsetsTopicCompressionCodec)
}
private def assertPropertyInvalid(validRequiredProps: => Properties, name: String, values: Any*) {
values.foreach((value) => {
val props = validRequiredProps
props.setProperty(name, value.toString)
intercept[Exception] {
KafkaConfig.fromProps(props)
}
})
}
}
| geeag/kafka | core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala | Scala | apache-2.0 | 30,238 |
package skuber.examples.guestbook
import skuber._
import skuber.json.format._
import akka.actor.{Actor, ActorRef, ActorLogging}
import akka.actor.Props
import akka.event.{LoggingReceive}
import akka.pattern.pipe
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Success, Failure}
import scala.collection._
import play.api.libs.iteratee.Iteratee
/**
* A KubernetesProxyActor proxies all requests from the Guestbook actors to Kubernetes. It is a slim wrapper
* around the skuber API, and enables sharing of skuber resources (e.g. underlying WS client
* connections, watches etc.) by all calling actors.
* It supports request messages to create/delete/get Service and Replication Controller
* resources on Kubernetes.
* For each of the above the actor simply creates and invokes a skuber request, and pipes the
* (future) response back to a result handler actor specified in the request message.
*
* It also supports a WatchReplicationController message that puts a reactive watch on a specified
* replication controller that forwards any updates received via the watch to one or more
* a specified actor. Multiple actors may watch the same controller - they reuse the same underlying watch.
* These watches are used by ScalerActor for monitoring the progress of scaling up/down of
* Guestbook services on the cluster.
*
* @author David O'Riordan
*/
object KubernetesProxyActor {
// messages we accept
sealed abstract trait KubernetesRequestMessage
case class DeleteService(name: String, resultHandler: ActorRef) extends KubernetesRequestMessage
case class DeleteReplicationController(name: String, resultHandler: ActorRef) extends KubernetesRequestMessage
case class CreateService(serviceSpec: Service, resultHandler: ActorRef) extends KubernetesRequestMessage
case class CreateReplicationController(rcSpec: ReplicationController, resultHandler: ActorRef) extends KubernetesRequestMessage
case class GetReplicationController(name: String, resultHandler: ActorRef) extends KubernetesRequestMessage
case class UpdateReplicationController(newSpec: ReplicationController, resultHandler: ActorRef) extends KubernetesRequestMessage
case class WatchReplicationController(rc: ReplicationController, watcher: ActorRef) extends KubernetesRequestMessage
case class UnwatchReplicationController(rc: ReplicationController, watcher: ActorRef) extends KubernetesRequestMessage
case object Close extends KubernetesRequestMessage
case object Closed // response to Close request
case object ResourceNotFound // return this if the target resource does not exist
}
class KubernetesProxyActor extends Actor with ActorLogging {
val k8s = k8sInit // initialize skuber client (request context)
var rcWatching = mutable.HashMap[String, Watching]()
private def invoke(skuberRequest: => Future[Any]) : Future[Any] = {
val reply = skuberRequest recover {
case k8ex: K8SException if (k8ex.status.code.get==404) => {
log.debug("resource not found on Kubernetes")
KubernetesProxyActor.ResourceNotFound
}
}
reply onSuccess {
case msg => log.debug("Kubernetes proxy returning: " + msg)
}
reply onFailure {
case k8ex: K8SException => log.error("Kubernetes API returned failure...status = " + k8ex.status.code)
}
reply
}
import KubernetesProxyActor._
def receive = LoggingReceive {
case DeleteService(name,resultHandler) => invoke(k8s delete[Service] name) pipeTo resultHandler
case DeleteReplicationController(name, resultHandler) => invoke(k8s delete[ReplicationController] name) pipeTo resultHandler
case CreateService(serviceSpec, resultHandler) => invoke(k8s create[Service] serviceSpec) pipeTo resultHandler
case CreateReplicationController(rcSpec, resultHandler) => invoke(k8s create[ReplicationController] rcSpec) pipeTo resultHandler
case GetReplicationController(name: String, resultHandler) => invoke(k8s get[ReplicationController] name) pipeTo resultHandler
case UpdateReplicationController(newSpec, resultHandler) => invoke(k8s update[ReplicationController] newSpec) pipeTo resultHandler
case WatchReplicationController(rc: ReplicationController, watcher: ActorRef) => {
val currentlyWatching = rcWatching.get(rc.name)
currentlyWatching match {
case Some(watching) => {
// already watching this RC - just add the watcher to the set of watching actors
log.debug("Controller '" + rc.name +"' is already beng wateched - adding new watcher " + watcher.path)
val newWatching = watching.copy(watchers = watching.watchers + watcher)
rcWatching.put(rc.name, newWatching)
}
case None => {
// not yet watching this controller
// create a new watch on Kubernetes, and initialize the set of watchers on it
log.debug("creating a watch on Kubernetes for controller + '" + rc.name + "', watcher is " + watcher.path )
val watch = k8s watch rc
val watching = Set(watcher)
rcWatching += rc.name -> Watching(watch, watching)
// this iteratee simply sends any updated RC objects received via the watch
// on to all watchers
watch.events run Iteratee.foreach { rcUpdateEvent =>
rcWatching.get(rc.name).foreach { _.watchers.foreach { _ ! rcUpdateEvent._object } }
}
}
}
}
case UnwatchReplicationController(rc: ReplicationController, watcher: ActorRef) => {
rcWatching.get(rc.name).foreach { watching =>
val newWatchers = watching.watchers - watcher
log.debug("removing watcher on '" + rc.name + "'")
rcWatching.put(rc.name, watching.copy(watchers=newWatchers))
}
}
case Close => {
rcWatching foreach { case (_, watching) =>
watching.watch.terminate
}
k8s.close
System.out.println("Closed skuber client")
sender ! Closed
}
}
}
case class Watching(watch: K8SWatch[K8SWatchEvent[ReplicationController]], watchers: Set[ActorRef]) | minatjanster/skuber | examples/src/main/scala/skuber/examples/guestbook/KubernetesProxyActor.scala | Scala | apache-2.0 | 6,191 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.arkadius.ljugakka
import scala.collection.immutable.IndexedSeq
object Utils {
implicit class IntErichment(int: Int) {
def times[T](f: => T): IndexedSeq[T] = {
(0 until int).map(_ => f)
}
}
} | arkadius/akka-ljug | src/test/scala/org/github/arkadius/ljugakka/Utils.scala | Scala | apache-2.0 | 846 |
package controllers.Post
import controllers.{Secured}
import models.Post.ModelComment
import org.joda.time.DateTime
import play.api.mvc.{Action, Controller}
import play.modules.reactivemongo.MongoController
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.{BSONDocument, BSONObjectID}
import scala.concurrent.ExecutionContext.Implicits.global
import play.api.Logger
import scala.concurrent.Future
/**
* Created by marvin on 15-5-29.
*
*/
class Comment extends Controller with MongoController with Secured {
def collection = db.collection[BSONCollection]("comments")
def makeComment(id:String) = Action.async { implicit request =>
ModelComment.form.bindFromRequest.fold(
errors => Future.successful(Ok("Error")),
comment =>
collection.insert(comment.copy(postId = Some(BSONObjectID(id)),creationDate = Some(new DateTime()), updateDate = Some(new DateTime()))).map(_ =>
Redirect(controllers.Post.routes.Post.getAllPost()))
)
}
def deleteCommentById(id:String) = Action.async{implicit request =>
Logger.info("deleteCommentById")
collection.remove(BSONDocument("_id" -> BSONObjectID(id))).map{
case lastError => Redirect(controllers.Post.routes.Post.getAllPost())
}
}
/*def getCommentByPostId(postId:String) = Action.async{
implicit request =>
val comments = collection.find(BSONDocument("postId" -> BSONObjectID(postId))).cursor[models.Post.Comment]
comments.collect[List]().map { commentList =>
val contentList: List[String] = commentList.map {
comment => comment.content
}
Logger.info(contentList.toString())
Ok(views.html.Post.comments.render(contentList))
}
}*/
} | mawentao007/Mongo-play-angular | app/controllers/Post/Comment.scala | Scala | apache-2.0 | 1,762 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical.subquery
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.junit.{Before, Test}
/**
* Test for [[org.apache.flink.table.planner.plan.rules.logical.FlinkRewriteSubQueryRule]].
*/
class FlinkRewriteSubQueryRuleTest extends SubQueryTestBase {
@Before
def setup(): Unit = {
util.addTableSource[(Int, Long, String)]("x", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("y", 'd, 'e, 'f)
}
@Test
def testNotCountStarInScalarQuery(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(e) FROM y WHERE d > 10) > 0")
}
@Test
def testNotEmptyGroupByInScalarQuery(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10 GROUP BY f) > 0")
}
@Test
def testUnsupportedConversionWithUnexpectedComparisonNumber(): Unit = {
// without correlation
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) > 1", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) >= 0", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) > -1", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE 0 <= (SELECT COUNT(*) FROM y WHERE d > 10)", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE -1 < (SELECT COUNT(*) FROM y WHERE d > 10)", "joinType=[semi]")
// with correlation
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) > 1", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) >= 0", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE 1 < (SELECT COUNT(*) FROM y WHERE a = d)", "joinType=[semi]")
util.verifyPlanNotExpected(
"SELECT * FROM x WHERE 0 <= (SELECT COUNT(*) FROM y WHERE a = d)", "joinType=[semi]")
}
@Test
def testSupportedConversionWithoutCorrelation1(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) > 0")
}
@Test
def testSupportedConversionWithoutCorrelation2(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) > 0.9")
}
@Test
def testSupportedConversionWithoutCorrelation3(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) >= 1")
}
@Test
def testSupportedConversionWithoutCorrelation4(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE d > 10) >= 0.1")
}
@Test
def testSupportedConversionWithoutCorrelation5(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0 < (SELECT COUNT(*) FROM y WHERE d > 10)")
}
@Test
def testSupportedConversionWithoutCorrelation6(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0.99 < (SELECT COUNT(*) FROM y WHERE d > 10)")
}
@Test
def testSupportedConversionWithoutCorrelation7(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 1 <= (SELECT COUNT(*) FROM y WHERE d > 10)")
}
@Test
def testSupportedConversionWithoutCorrelation8(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0.01 <= (SELECT COUNT(*) FROM y WHERE d > 10)")
}
@Test
def testSupportedConversionWithCorrelation1(): Unit = {
// with correlation
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) > 0")
}
@Test
def testSupportedConversionWithCorrelation2(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) > 0.9")
}
@Test
def testSupportedConversionWithCorrelation3(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) >= 1")
}
@Test
def testSupportedConversionWithCorrelation4(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE (SELECT COUNT(*) FROM y WHERE a = d) >= 0.1")
}
@Test
def testSupportedConversionWithCorrelation5(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0 < (SELECT COUNT(*) FROM y WHERE a = d)")
}
@Test
def testSupportedConversionWithCorrelation6(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0.99 < (SELECT COUNT(*) FROM y WHERE a = d)")
}
@Test
def testSupportedConversionWithCorrelation7(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 1 <= (SELECT COUNT(*) FROM y WHERE a = d)")
}
@Test
def testSupportedConversionWithCorrelation8(): Unit = {
util.verifyPlan("SELECT * FROM x WHERE 0.01 <= (SELECT COUNT(*) FROM y WHERE a = d)")
}
@Test
def testSqlFromTpcDsQ41(): Unit = {
util.addTableSource[(Int, String, String, String, String, String, String)]("item",
'i_manufact_id, 'i_manufact, 'i_product_name, 'i_category, 'i_color, 'i_units, 'i_size)
val sqlQuery =
"""
|SELECT DISTINCT (i_product_name)
|FROM item i1
|WHERE i_manufact_id BETWEEN 738 AND 738 + 40
| AND (SELECT count(*) AS item_cnt
|FROM item
|WHERE (i_manufact = i1.i_manufact AND
| ((i_category = 'Women' AND
| (i_color = 'powder' OR i_color = 'khaki') AND
| (i_units = 'Ounce' OR i_units = 'Oz') AND
| (i_size = 'medium' OR i_size = 'extra large')
| ) OR
| (i_category = 'Women' AND
| (i_color = 'brown' OR i_color = 'honeydew') AND
| (i_units = 'Bunch' OR i_units = 'Ton') AND
| (i_size = 'N/A' OR i_size = 'small')
| ) OR
| (i_category = 'Men' AND
| (i_color = 'floral' OR i_color = 'deep') AND
| (i_units = 'N/A' OR i_units = 'Dozen') AND
| (i_size = 'petite' OR i_size = 'large')
| ) OR
| (i_category = 'Men' AND
| (i_color = 'light' OR i_color = 'cornflower') AND
| (i_units = 'Box' OR i_units = 'Pound') AND
| (i_size = 'medium' OR i_size = 'extra large')
| ))) OR
| (i_manufact = i1.i_manufact AND
| ((i_category = 'Women' AND
| (i_color = 'midnight' OR i_color = 'snow') AND
| (i_units = 'Pallet' OR i_units = 'Gross') AND
| (i_size = 'medium' OR i_size = 'extra large')
| ) OR
| (i_category = 'Women' AND
| (i_color = 'cyan' OR i_color = 'papaya') AND
| (i_units = 'Cup' OR i_units = 'Dram') AND
| (i_size = 'N/A' OR i_size = 'small')
| ) OR
| (i_category = 'Men' AND
| (i_color = 'orange' OR i_color = 'frosted') AND
| (i_units = 'Each' OR i_units = 'Tbl') AND
| (i_size = 'petite' OR i_size = 'large')
| ) OR
| (i_category = 'Men' AND
| (i_color = 'forest' OR i_color = 'ghost') AND
| (i_units = 'Lb' OR i_units = 'Bundle') AND
| (i_size = 'medium' OR i_size = 'extra large')
| )))) > 0
|ORDER BY i_product_name
|LIMIT 100
""".stripMargin
util.verifyPlan(sqlQuery)
}
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/subquery/FlinkRewriteSubQueryRuleTest.scala | Scala | apache-2.0 | 8,003 |
import java.io.{BufferedReader, InputStreamReader}
import java.net.URL
import sun.net.www.protocol.http.HttpURLConnection
abstract class RequestResult
case class DocExists(url: String) extends RequestResult
case class OK(url: String) extends RequestResult
object PostRequest {
def send(url: String, json: String) = {
val data = json.getBytes("UTF8")
val urlObj = new URL(url)
val conn = urlObj.openConnection.asInstanceOf[HttpURLConnection]
try {
init(conn)
writeRequest(conn, data)
val resp = getResponse(conn)
resp match {
case "exists" => DocExists(url)
case "ok" => OK(url)
case x => throw new Exception(x)
}
} finally {
conn.disconnect
}
}
private def init(conn: HttpURLConnection): Unit = {
conn.setDoInput(true)
conn.setDoOutput(true)
conn.setUseCaches(false)
conn.setInstanceFollowRedirects(false)
conn.addRequestProperty("Content-Type", "application/json")
conn.setRequestMethod("POST")
}
private def writeRequest(conn: HttpURLConnection, data: Array[Byte]): Unit = {
conn.setRequestProperty("Content-Length", data.length.toString)
common.using(conn.getOutputStream) {
stream =>
stream.write(data)
stream.flush
}
}
private def getResponse(conn: HttpURLConnection): String = {
common.usage(conn.getInputStream) {
is => common.usage(new InputStreamReader(is)) {
isr => common.usage(new BufferedReader(isr)) {
br =>
val resp = new StringBuffer
var line = br.readLine
while (line != null) {
if (resp.length > 0)
resp.append('\\r')
resp.append(line)
line = br.readLine
}
resp.toString
}
}
}
}
}
| abtv/personal-aggregator | rss-loader/src/main/scala/PostRequest.scala | Scala | mit | 1,849 |
package com.twitter.finagle.httpx
case class Status(code: Int)
object Status {
val Continue = Status(100)
val SwitchingProtocols = Status(101)
val Processing = Status(102)
val Ok = Status(200)
val Created = Status(201)
val Accepted = Status(202)
val NonAuthoritativeInformation = Status(203)
val NoContent = Status(204)
val ResetContent = Status(205)
val PartialContent = Status(206)
val MultiStatus = Status(207)
val MultipleChoices = Status(300)
val MovedPermanently = Status(301)
val Found = Status(302)
val SeeOther = Status(303)
val NotModified = Status(304)
val UseProxy = Status(305)
val TemporaryRedirect = Status(307)
val BadRequest = Status(400)
val Unauthorized = Status(401)
val PaymentRequired = Status(402)
val Forbidden = Status(403)
val NotFound = Status(404)
val MethodNotAllowed = Status(405)
val NotAcceptable = Status(406)
val ProxyAuthenticationRequired = Status(407)
val RequestTimeout = Status(408)
val Conflict = Status(409)
val Gone = Status(410)
val LengthRequired = Status(411)
val PreconditionFailed = Status(412)
val RequestEntityTooLarge = Status(413)
val RequestURITooLong = Status(414)
val UnsupportedMediaType = Status(415)
val RequestedRangeNotSatisfiable = Status(416)
val ExpectationFailed = Status(417)
val UnprocessableEntity = Status(422)
val Locked = Status(423)
val FailedDependency = Status(424)
val UnorderedCollection = Status(425)
val UpgradeRequired = Status(426)
val RequestHeaderFieldsTooLarge = Status(431)
val ClientClosedRequest = Status(499)
val InternalServerError = Status(500)
val NotImplemented = Status(501)
val BadGateway = Status(502)
val ServiceUnavailable = Status(503)
val GatewayTimeout = Status(504)
val HttpVersionNotSupported = Status(505)
val VariantAlsoNegotiates = Status(506)
val InsufficientStorage = Status(507)
val NotExtended = Status(510)
def fromCode(n: Int) = n match {
case 100 => Continue
case 101 => SwitchingProtocols
case 102 => Processing
case 200 => Ok
case 201 => Created
case 202 => Accepted
case 203 => NonAuthoritativeInformation
case 204 => NoContent
case 205 => ResetContent
case 206 => PartialContent
case 207 => MultiStatus
case 300 => MultipleChoices
case 301 => MovedPermanently
case 302 => Found
case 303 => SeeOther
case 304 => NotModified
case 305 => UseProxy
case 307 => TemporaryRedirect
case 400 => BadRequest
case 401 => Unauthorized
case 402 => PaymentRequired
case 403 => Forbidden
case 404 => NotFound
case 405 => MethodNotAllowed
case 406 => NotAcceptable
case 407 => ProxyAuthenticationRequired
case 408 => RequestTimeout
case 409 => Conflict
case 410 => Gone
case 411 => LengthRequired
case 412 => PreconditionFailed
case 413 => RequestEntityTooLarge
case 414 => RequestURITooLong
case 415 => UnsupportedMediaType
case 416 => RequestedRangeNotSatisfiable
case 417 => ExpectationFailed
case 422 => UnprocessableEntity
case 423 => Locked
case 424 => FailedDependency
case 425 => UnorderedCollection
case 426 => UpgradeRequired
case 431 => RequestHeaderFieldsTooLarge
case 499 => ClientClosedRequest
case 500 => InternalServerError
case 501 => NotImplemented
case 502 => BadGateway
case 503 => ServiceUnavailable
case 504 => GatewayTimeout
case 505 => HttpVersionNotSupported
case 506 => VariantAlsoNegotiates
case 507 => InsufficientStorage
case 510 => NotExtended
case _ => Status(n)
}
}
| kristofa/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/Status.scala | Scala | apache-2.0 | 3,617 |
package api
import play.api.data.validation.ValidationError
import play.api.libs.json._
/**
* Error from an Api call.
*/
case class ApiError(error: String, details: Option[JsObject] = None)
object ApiError {
def apply(error: String, details: JsObject): ApiError =
new ApiError(error, Some(details))
def apply(error: String, e: Seq[(JsPath, Seq[ValidationError])]): ApiError =
ApiError(error, JsError.toJson(e))
def apply(error: String, e: JsError): ApiError =
ApiError(error, JsError.toJson(e))
implicit val apiErrorWrites = new Writes[ApiError] {
def writes(x: ApiError): JsValue =
Json.obj(
"type" -> "Error",
"error" -> x.error,
"details" -> x.details)
}
}
/**
* Result of an Api call.
*/
sealed abstract class ApiResult[+T] {
def toJson()(implicit writes: Writes[T]): JsValue
}
/**
* Api call success result.
*/
class ApiSuccess[+T](val value: T) extends ApiResult[T] {
def toJson()(implicit writes: Writes[T]) = Json.toJson(value)
}
object ApiSuccess {
def unapply[T](t: ApiSuccess[T]): Option[T] = Some(t.value)
}
case class ApiCreated[T](x: T) extends ApiSuccess(x)
case class ApiOk[T](x: T) extends ApiSuccess(x)
/**
* Api call failure result.
*/
class ApiFailure[+T](val value: ApiError) extends ApiResult[T] {
def toJson()(implicit writes: Writes[T]) = Json.toJson(value)
}
object ApiFailure {
def unapply[T](t: ApiFailure[T]): Option[ApiError] = Some(t.value)
}
case class ApiNotFound(x: ApiError) extends ApiFailure(x)
case class ApiUnauthroized(x: ApiError) extends ApiFailure(x)
case class ApiCouldNotProccessRequest(x: ApiError) extends ApiFailure(x)
case class ApiInternalError() extends ApiFailure(ApiError("An internal server error occurred."))
| Blotre/blotre | app/api/Api.scala | Scala | mit | 1,761 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.libs
import java.util.Locale
/**
* MIME type utilities.
*/
object MimeTypes {
/**
* Retrieves the usual MIME type for a given extension.
*
* @param ext the file extension, e.g. `txt`
* @return the MIME type, if defined
*/
def forExtension(ext: String): Option[String] = types.get(ext.toLowerCase(Locale.ENGLISH))
/**
* Retrieves the usual MIME type for a given file name
*
* @param name the file name, e.g. `hello.txt`
* @return the MIME type, if defined
*/
def forFileName(name: String): Option[String] = name.split('.').takeRight(1).headOption.flatMap(forExtension(_))
def types: Map[String, String] = defaultTypes ++ applicationTypes
/**
* Mimetypes defined in the current application, as declared in application.conf
*/
def applicationTypes: Map[String, String] = play.api.Play.maybeApplication.flatMap { application =>
application.configuration.getConfig("mimetype").map { config =>
config.subKeys.map { key =>
(key, config.getString(key))
}.collect {
case ((key, Some(value))) =>
(key, value)
}.toMap
}
}.getOrElse(Map.empty)
/**
* tells you if mimeType is text or not.
* Useful to determine whether the charset suffix should be attached to Content-Type or not
* @param mimeType mimeType to check
* @return true if mimeType is text
*/
def isText(mimeType: String): Boolean = {
mimeType.trim match {
case text if text.startsWith("text/") => true
case text if additionalText.contains(text) => true
case _ => false
}
}
lazy val defaultTypes =
"""
3dm=x-world/x-3dmf
3dmf=x-world/x-3dmf
7z=application/x-7z-compressed
a=application/octet-stream
aab=application/x-authorware-bin
aam=application/x-authorware-map
aas=application/x-authorware-seg
abc=text/vndabc
ace=application/x-ace-compressed
acgi=text/html
afl=video/animaflex
ai=application/postscript
aif=audio/aiff
aifc=audio/aiff
aiff=audio/aiff
aim=application/x-aim
aip=text/x-audiosoft-intra
alz=application/x-alz-compressed
ani=application/x-navi-animation
aos=application/x-nokia-9000-communicator-add-on-software
aps=application/mime
arc=application/x-arc-compressed
arj=application/arj
art=image/x-jg
asf=video/x-ms-asf
asm=text/x-asm
asp=text/asp
asx=application/x-mplayer2
au=audio/basic
avi=video/x-msvideo
avs=video/avs-video
bcpio=application/x-bcpio
bin=application/mac-binary
bmp=image/bmp
boo=application/book
book=application/book
boz=application/x-bzip2
bsh=application/x-bsh
bz2=application/x-bzip2
bz=application/x-bzip
c++=text/plain
c=text/x-c
cab=application/vnd.ms-cab-compressed
cat=application/vndms-pkiseccat
cc=text/x-c
ccad=application/clariscad
cco=application/x-cocoa
cdf=application/cdf
cer=application/pkix-cert
cha=application/x-chat
chat=application/x-chat
chrt=application/vnd.kde.kchart
class=application/java
# ? class=application/java-vm
com=text/plain
conf=text/plain
cpio=application/x-cpio
cpp=text/x-c
cpt=application/mac-compactpro
crl=application/pkcs-crl
crt=application/pkix-cert
crx=application/x-chrome-extension
csh=text/x-scriptcsh
css=text/css
csv=text/csv
cxx=text/plain
dar=application/x-dar
dcr=application/x-director
deb=application/x-debian-package
deepv=application/x-deepv
def=text/plain
der=application/x-x509-ca-cert
dfont=application/x-font-ttf
dif=video/x-dv
dir=application/x-director
divx=video/divx
dl=video/dl
dmg=application/x-apple-diskimage
doc=application/msword
dot=application/msword
dp=application/commonground
drw=application/drafting
dump=application/octet-stream
dv=video/x-dv
dvi=application/x-dvi
dwf=drawing/x-dwf=(old)
dwg=application/acad
dxf=application/dxf
dxr=application/x-director
el=text/x-scriptelisp
elc=application/x-bytecodeelisp=(compiled=elisp)
eml=message/rfc822
env=application/x-envoy
eot=application/vnd.ms-fontobject
eps=application/postscript
es=application/x-esrehber
etx=text/x-setext
evy=application/envoy
exe=application/octet-stream
f77=text/x-fortran
f90=text/x-fortran
f=text/x-fortran
fdf=application/vndfdf
fif=application/fractals
fli=video/fli
flo=image/florian
flv=video/x-flv
flx=text/vndfmiflexstor
fmf=video/x-atomic3d-feature
for=text/x-fortran
fpx=image/vndfpx
frl=application/freeloader
funk=audio/make
g3=image/g3fax
g=text/plain
gif=image/gif
gl=video/gl
gsd=audio/x-gsm
gsm=audio/x-gsm
gsp=application/x-gsp
gss=application/x-gss
gtar=application/x-gtar
gz=application/x-compressed
gzip=application/x-gzip
h=text/x-h
hdf=application/x-hdf
help=application/x-helpfile
hgl=application/vndhp-hpgl
hh=text/x-h
hlb=text/x-script
hlp=application/hlp
hpg=application/vndhp-hpgl
hpgl=application/vndhp-hpgl
hqx=application/binhex
hta=application/hta
htc=text/x-component
htm=text/html
html=text/html
htmls=text/html
htt=text/webviewhtml
htx=text/html
ice=x-conference/x-cooltalk
ico=image/x-icon
ics=text/calendar
icz=text/calendar
idc=text/plain
ief=image/ief
iefs=image/ief
iges=application/iges
igs=application/iges
ima=application/x-ima
imap=application/x-httpd-imap
inf=application/inf
ins=application/x-internett-signup
ip=application/x-ip2
isu=video/x-isvideo
it=audio/it
iv=application/x-inventor
ivr=i-world/i-vrml
ivy=application/x-livescreen
jam=audio/x-jam
jav=text/x-java-source
java=text/x-java-source
jcm=application/x-java-commerce
jfif-tbnl=image/jpeg
jfif=image/jpeg
jnlp=application/x-java-jnlp-file
jpe=image/jpeg
jpeg=image/jpeg
jpg=image/jpeg
jps=image/x-jps
js=application/javascript
json=application/json
jut=image/jutvision
kar=audio/midi
karbon=application/vnd.kde.karbon
kfo=application/vnd.kde.kformula
flw=application/vnd.kde.kivio
kml=application/vnd.google-earth.kml+xml
kmz=application/vnd.google-earth.kmz
kon=application/vnd.kde.kontour
kpr=application/vnd.kde.kpresenter
kpt=application/vnd.kde.kpresenter
ksp=application/vnd.kde.kspread
kwd=application/vnd.kde.kword
kwt=application/vnd.kde.kword
ksh=text/x-scriptksh
la=audio/nspaudio
lam=audio/x-liveaudio
latex=application/x-latex
lha=application/lha
lhx=application/octet-stream
list=text/plain
lma=audio/nspaudio
log=text/plain
lsp=text/x-scriptlisp
lst=text/plain
lsx=text/x-la-asf
ltx=application/x-latex
lzh=application/octet-stream
lzx=application/lzx
m1v=video/mpeg
m2a=audio/mpeg
m2v=video/mpeg
m3u=audio/x-mpegurl
m=text/x-m
man=application/x-troff-man
manifest=text/cache-manifest
map=application/x-navimap
mar=text/plain
mbd=application/mbedlet
mc$=application/x-magic-cap-package-10
mcd=application/mcad
mcf=text/mcf
mcp=application/netmc
me=application/x-troff-me
mht=message/rfc822
mhtml=message/rfc822
mid=application/x-midi
midi=application/x-midi
mif=application/x-frame
mime=message/rfc822
mjf=audio/x-vndaudioexplosionmjuicemediafile
mjpg=video/x-motion-jpeg
mm=application/base64
mme=application/base64
mod=audio/mod
moov=video/quicktime
mov=video/quicktime
movie=video/x-sgi-movie
mp2=audio/mpeg
mp3=audio/mpeg
mp4=video/mp4
mpa=audio/mpeg
mpc=application/x-project
mpe=video/mpeg
mpeg=video/mpeg
mpg=video/mpeg
mpga=audio/mpeg
mpp=application/vndms-project
mpt=application/x-project
mpv=application/x-project
mpx=application/x-project
mrc=application/marc
ms=application/x-troff-ms
mv=video/x-sgi-movie
my=audio/make
mzz=application/x-vndaudioexplosionmzz
nap=image/naplps
naplps=image/naplps
nc=application/x-netcdf
ncm=application/vndnokiaconfiguration-message
nif=image/x-niff
niff=image/x-niff
nix=application/x-mix-transfer
nsc=application/x-conference
nvd=application/x-navidoc
o=application/octet-stream
oda=application/oda
odb=application/vnd.oasis.opendocument.database
odc=application/vnd.oasis.opendocument.chart
odf=application/vnd.oasis.opendocument.formula
odg=application/vnd.oasis.opendocument.graphics
odi=application/vnd.oasis.opendocument.image
odm=application/vnd.oasis.opendocument.text-master
odp=application/vnd.oasis.opendocument.presentation
ods=application/vnd.oasis.opendocument.spreadsheet
odt=application/vnd.oasis.opendocument.text
oga=audio/ogg
ogg=audio/ogg
ogv=video/ogg
omc=application/x-omc
omcd=application/x-omcdatamaker
omcr=application/x-omcregerator
otc=application/vnd.oasis.opendocument.chart-template
otf=application/vnd.oasis.opendocument.formula-template
otg=application/vnd.oasis.opendocument.graphics-template
oth=application/vnd.oasis.opendocument.text-web
oti=application/vnd.oasis.opendocument.image-template
otm=application/vnd.oasis.opendocument.text-master
otp=application/vnd.oasis.opendocument.presentation-template
ots=application/vnd.oasis.opendocument.spreadsheet-template
ott=application/vnd.oasis.opendocument.text-template
p10=application/pkcs10
p12=application/pkcs-12
p7a=application/x-pkcs7-signature
p7c=application/pkcs7-mime
p7m=application/pkcs7-mime
p7r=application/x-pkcs7-certreqresp
p7s=application/pkcs7-signature
p=text/x-pascal
part=application/pro_eng
pas=text/pascal
pbm=image/x-portable-bitmap
pcl=application/vndhp-pcl
pct=image/x-pict
pcx=image/x-pcx
pdb=chemical/x-pdb
pdf=application/pdf
pfunk=audio/make
pgm=image/x-portable-graymap
pic=image/pict
pict=image/pict
pkg=application/x-newton-compatible-pkg
pko=application/vndms-pkipko
pl=text/x-scriptperl
plx=application/x-pixclscript
pm4=application/x-pagemaker
pm5=application/x-pagemaker
pm=text/x-scriptperl-module
png=image/png
pnm=application/x-portable-anymap
pot=application/mspowerpoint
pov=model/x-pov
ppa=application/vndms-powerpoint
ppm=image/x-portable-pixmap
pps=application/mspowerpoint
ppt=application/mspowerpoint
ppz=application/mspowerpoint
pre=application/x-freelance
prt=application/pro_eng
ps=application/postscript
psd=application/octet-stream
pvu=paleovu/x-pv
pwz=application/vndms-powerpoint
py=text/x-scriptphyton
pyc=applicaiton/x-bytecodepython
qcp=audio/vndqcelp
qd3=x-world/x-3dmf
qd3d=x-world/x-3dmf
qif=image/x-quicktime
qt=video/quicktime
qtc=video/x-qtc
qti=image/x-quicktime
qtif=image/x-quicktime
ra=audio/x-pn-realaudio
ram=audio/x-pn-realaudio
rar=application/x-rar-compressed
ras=application/x-cmu-raster
rast=image/cmu-raster
rdf=application/rdf+xml
rexx=text/x-scriptrexx
rf=image/vndrn-realflash
rgb=image/x-rgb
rm=application/vndrn-realmedia
rmi=audio/mid
rmm=audio/x-pn-realaudio
rmp=audio/x-pn-realaudio
rng=application/ringing-tones
rnx=application/vndrn-realplayer
roff=application/x-troff
rp=image/vndrn-realpix
rpm=audio/x-pn-realaudio-plugin
rt=text/vndrn-realtext
rtf=text/richtext
rtx=text/richtext
rv=video/vndrn-realvideo
s=text/x-asm
s3m=audio/s3m
s7z=application/x-7z-compressed
saveme=application/octet-stream
sbk=application/x-tbook
scm=text/x-scriptscheme
sdml=text/plain
sdp=application/sdp
sdr=application/sounder
sea=application/sea
set=application/set
sgm=text/x-sgml
sgml=text/x-sgml
sh=text/x-scriptsh
shar=application/x-bsh
shtml=text/x-server-parsed-html
sid=audio/x-psid
skd=application/x-koan
skm=application/x-koan
skp=application/x-koan
skt=application/x-koan
sit=application/x-stuffit
sitx=application/x-stuffitx
sl=application/x-seelogo
smi=application/smil
smil=application/smil
snd=audio/basic
sol=application/solids
spc=text/x-speech
spl=application/futuresplash
spr=application/x-sprite
sprite=application/x-sprite
spx=audio/ogg
src=application/x-wais-source
ssi=text/x-server-parsed-html
ssm=application/streamingmedia
sst=application/vndms-pkicertstore
step=application/step
stl=application/sla
stp=application/step
sv4cpio=application/x-sv4cpio
sv4crc=application/x-sv4crc
svf=image/vnddwg
svg=image/svg+xml
svr=application/x-world
swf=application/x-shockwave-flash
t=application/x-troff
talk=text/x-speech
tar=application/x-tar
tbk=application/toolbook
tcl=text/x-scripttcl
tcsh=text/x-scripttcsh
tex=application/x-tex
texi=application/x-texinfo
texinfo=application/x-texinfo
text=text/plain
tgz=application/gnutar
tif=image/tiff
tiff=image/tiff
tr=application/x-troff
tsi=audio/tsp-audio
tsp=application/dsptype
tsv=text/tab-separated-values
turbot=image/florian
tte=application/x-font-ttf
ttf=application/x-font-ttf
ttl=text/turtle
txt=text/plain
uil=text/x-uil
uni=text/uri-list
unis=text/uri-list
unv=application/i-deas
uri=text/uri-list
uris=text/uri-list
ustar=application/x-ustar
uu=text/x-uuencode
uue=text/x-uuencode
vcd=application/x-cdlink
vcf=text/x-vcard
vcard=text/x-vcard
vcs=text/x-vcalendar
vda=application/vda
vdo=video/vdo
vew=application/groupwise
viv=video/vivo
vivo=video/vivo
vmd=application/vocaltec-media-desc
vmf=application/vocaltec-media-file
voc=audio/voc
vos=video/vosaic
vox=audio/voxware
vqe=audio/x-twinvq-plugin
vqf=audio/x-twinvq
vql=audio/x-twinvq-plugin
vrml=application/x-vrml
vrt=x-world/x-vrt
vsd=application/x-visio
vst=application/x-visio
vsw=application/x-visio
w60=application/wordperfect60
w61=application/wordperfect61
w6w=application/msword
wav=audio/wav
wb1=application/x-qpro
wbmp=image/vnd.wap.wbmp
web=application/vndxara
wiz=application/msword
wk1=application/x-123
wmf=windows/metafile
wml=text/vnd.wap.wml
wmlc=application/vnd.wap.wmlc
wmls=text/vnd.wap.wmlscript
wmlsc=application/vnd.wap.wmlscriptc
woff=application/font-woff
word=application/msword
wp5=application/wordperfect
wp6=application/wordperfect
wp=application/wordperfect
wpd=application/wordperfect
wq1=application/x-lotus
wri=application/mswrite
wrl=application/x-world
wrz=model/vrml
wsc=text/scriplet
wsrc=application/x-wais-source
wtk=application/x-wintalk
x-png=image/png
xbm=image/x-xbitmap
xdr=video/x-amt-demorun
xgz=xgl/drawing
xif=image/vndxiff
xl=application/excel
xla=application/excel
xlb=application/excel
xlc=application/excel
xld=application/excel
xlk=application/excel
xll=application/excel
xlm=application/excel
xls=application/excel
xlt=application/excel
xlv=application/excel
xlw=application/excel
xm=audio/xm
xml=application/xml
xmz=xgl/movie
xpi=application/x-xpinstall
xpix=application/x-vndls-xpix
xpm=image/x-xpixmap
xsr=video/x-amt-showrun
xwd=image/x-xwd
xyz=chemical/x-pdb
z=application/x-compress
zip=application/zip
zoo=application/octet-stream
zsh=text/x-scriptzsh
# Office 2007 mess - http://wdg.uncc.edu/Microsoft_Office_2007_MIME_Types_for_Apache_and_IIS
docx=application/vnd.openxmlformats-officedocument.wordprocessingml.document
docm=application/vnd.ms-word.document.macroEnabled.12
dotx=application/vnd.openxmlformats-officedocument.wordprocessingml.template
dotm=application/vnd.ms-word.template.macroEnabled.12
xlsx=application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
xlsm=application/vnd.ms-excel.sheet.macroEnabled.12
xltx=application/vnd.openxmlformats-officedocument.spreadsheetml.template
xltm=application/vnd.ms-excel.template.macroEnabled.12
xlsb=application/vnd.ms-excel.sheet.binary.macroEnabled.12
xlam=application/vnd.ms-excel.addin.macroEnabled.12
pptx=application/vnd.openxmlformats-officedocument.presentationml.presentation
pptm=application/vnd.ms-powerpoint.presentation.macroEnabled.12
ppsx=application/vnd.openxmlformats-officedocument.presentationml.slideshow
ppsm=application/vnd.ms-powerpoint.slideshow.macroEnabled.12
potx=application/vnd.openxmlformats-officedocument.presentationml.template
potm=application/vnd.ms-powerpoint.template.macroEnabled.12
ppam=application/vnd.ms-powerpoint.addin.macroEnabled.12
sldx=application/vnd.openxmlformats-officedocument.presentationml.slide
sldm=application/vnd.ms-powerpoint.slide.macroEnabled.12
thmx=application/vnd.ms-officetheme
onetoc=application/onenote
onetoc2=application/onenote
onetmp=application/onenote
onepkg=application/onenote
# koffice
# iWork
key=application/x-iwork-keynote-sffkey
kth=application/x-iwork-keynote-sffkth
nmbtemplate=application/x-iwork-numbers-sfftemplate
numbers=application/x-iwork-numbers-sffnumbers
pages=application/x-iwork-pages-sffpages
template=application/x-iwork-pages-sfftemplate
# Extensions for Mozilla apps (Firefox and friends)
xpi=application/x-xpinstall
""".split('\\n').map(_.trim).filter(_.size > 0).filter(_(0) != '#').map(_.split('=')).map(parts =>
parts(0) -> parts.drop(1).mkString).toMap
lazy val additionalText =
"""
application/json
application/javascript
""".split('\\n').map(_.trim).filter(_.size > 0).filter(_(0) != '#')
}
| jyotikamboj/container | pf-framework/src/play/src/main/scala/play/api/libs/MimeTypes.scala | Scala | mit | 20,497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryNode, RowIterator, SparkPlan}
import org.apache.spark.sql.execution.metric.{LongSQLMetric, SQLMetrics}
/**
* :: DeveloperApi ::
* Performs an sort merge join of two child relations.
*/
@DeveloperApi
case class SortMergeJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
left: SparkPlan,
right: SparkPlan) extends BinaryNode {
override private[sql] lazy val metrics = Map(
"numLeftRows" -> SQLMetrics.createLongMetric(sparkContext, "number of left rows"),
"numRightRows" -> SQLMetrics.createLongMetric(sparkContext, "number of right rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def output: Seq[Attribute] = left.output ++ right.output
override def outputPartitioning: Partitioning =
PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning))
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: Nil
override def outputOrdering: Seq[SortOrder] = requiredOrders(leftKeys)
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
protected[this] def isUnsafeMode: Boolean = {
(codegenEnabled && unsafeEnabled
&& UnsafeProjection.canSupport(leftKeys)
&& UnsafeProjection.canSupport(rightKeys)
&& UnsafeProjection.canSupport(schema))
}
override def outputsUnsafeRows: Boolean = isUnsafeMode
override def canProcessUnsafeRows: Boolean = isUnsafeMode
override def canProcessSafeRows: Boolean = !isUnsafeMode
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] = {
// This must be ascending in order to agree with the `keyOrdering` defined in `doExecute()`.
keys.map(SortOrder(_, Ascending))
}
protected override def doExecute(): RDD[InternalRow] = {
val numLeftRows = longMetric("numLeftRows")
val numRightRows = longMetric("numRightRows")
val numOutputRows = longMetric("numOutputRows")
left.execute().zipPartitions(right.execute()) { (leftIter, rightIter) =>
new RowIterator {
// The projection used to extract keys from input rows of the left child.
private[this] val leftKeyGenerator = {
if (isUnsafeMode) {
// It is very important to use UnsafeProjection if input rows are UnsafeRows.
// Otherwise, GenerateProjection will cause wrong results.
UnsafeProjection.create(leftKeys, left.output)
} else {
newProjection(leftKeys, left.output)
}
}
// The projection used to extract keys from input rows of the right child.
private[this] val rightKeyGenerator = {
if (isUnsafeMode) {
// It is very important to use UnsafeProjection if input rows are UnsafeRows.
// Otherwise, GenerateProjection will cause wrong results.
UnsafeProjection.create(rightKeys, right.output)
} else {
newProjection(rightKeys, right.output)
}
}
// An ordering that can be used to compare keys from both sides.
private[this] val keyOrdering = newNaturalAscendingOrdering(leftKeys.map(_.dataType))
private[this] var currentLeftRow: InternalRow = _
private[this] var currentRightMatches: ArrayBuffer[InternalRow] = _
private[this] var currentMatchIdx: Int = -1
private[this] val smjScanner = new SortMergeJoinScanner(
leftKeyGenerator,
rightKeyGenerator,
keyOrdering,
RowIterator.fromScala(leftIter),
numLeftRows,
RowIterator.fromScala(rightIter),
numRightRows
)
private[this] val joinRow = new JoinedRow
private[this] val resultProjection: (InternalRow) => InternalRow = {
if (isUnsafeMode) {
UnsafeProjection.create(schema)
} else {
identity[InternalRow]
}
}
override def advanceNext(): Boolean = {
if (currentMatchIdx == -1 || currentMatchIdx == currentRightMatches.length) {
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
currentMatchIdx = 0
} else {
currentRightMatches = null
currentLeftRow = null
currentMatchIdx = -1
}
}
if (currentLeftRow != null) {
joinRow(currentLeftRow, currentRightMatches(currentMatchIdx))
currentMatchIdx += 1
numOutputRows += 1
true
} else {
false
}
}
override def getRow: InternalRow = resultProjection(joinRow)
}.toScala
}
}
}
/**
* Helper class that is used to implement [[SortMergeJoin]] and [[SortMergeOuterJoin]].
*
* To perform an inner (outer) join, users of this class call [[findNextInnerJoinRows()]]
* ([[findNextOuterJoinRows()]]), which returns `true` if a result has been produced and `false`
* otherwise. If a result has been produced, then the caller may call [[getStreamedRow]] to return
* the matching row from the streamed input and may call [[getBufferedMatches]] to return the
* sequence of matching rows from the buffered input (in the case of an outer join, this will return
* an empty sequence if there are no matches from the buffered input). For efficiency, both of these
* methods return mutable objects which are re-used across calls to the `findNext*JoinRows()`
* methods.
*
* @param streamedKeyGenerator a projection that produces join keys from the streamed input.
* @param bufferedKeyGenerator a projection that produces join keys from the buffered input.
* @param keyOrdering an ordering which can be used to compare join keys.
* @param streamedIter an input whose rows will be streamed.
* @param bufferedIter an input whose rows will be buffered to construct sequences of rows that
* have the same join key.
*/
private[joins] class SortMergeJoinScanner(
streamedKeyGenerator: Projection,
bufferedKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
streamedIter: RowIterator,
numStreamedRows: LongSQLMetric,
bufferedIter: RowIterator,
numBufferedRows: LongSQLMetric) {
private[this] var streamedRow: InternalRow = _
private[this] var streamedRowKey: InternalRow = _
private[this] var bufferedRow: InternalRow = _
// Note: this is guaranteed to never have any null columns:
private[this] var bufferedRowKey: InternalRow = _
/**
* The join key for the rows buffered in `bufferedMatches`, or null if `bufferedMatches` is empty
*/
private[this] var matchJoinKey: InternalRow = _
/** Buffered rows from the buffered side of the join. This is empty if there are no matches. */
private[this] val bufferedMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
// Initialization (note: do _not_ want to advance streamed here).
advancedBufferedToRowWithNullFreeJoinKey()
// --- Public methods ---------------------------------------------------------------------------
def getStreamedRow: InternalRow = streamedRow
def getBufferedMatches: ArrayBuffer[InternalRow] = bufferedMatches
/**
* Advances both input iterators, stopping when we have found rows with matching join keys.
* @return true if matching rows have been found and false otherwise. If this returns true, then
* [[getStreamedRow]] and [[getBufferedMatches]] can be called to construct the join
* results.
*/
final def findNextInnerJoinRows(): Boolean = {
while (advancedStreamed() && streamedRowKey.anyNull) {
// Advance the streamed side of the join until we find the next row whose join key contains
// no nulls or we hit the end of the streamed iterator.
}
if (streamedRow == null) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// The new streamed row has the same join key as the previous row, so return the same matches.
true
} else if (bufferedRow == null) {
// The streamed row's join key does not match the current batch of buffered rows and there are
// no more rows to read from the buffered iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// Advance both the streamed and buffered iterators to find the next pair of matching rows.
var comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
do {
if (streamedRowKey.anyNull) {
advancedStreamed()
} else {
assert(!bufferedRowKey.anyNull)
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
if (comp > 0) advancedBufferedToRowWithNullFreeJoinKey()
else if (comp < 0) advancedStreamed()
}
} while (streamedRow != null && bufferedRow != null && comp != 0)
if (streamedRow == null || bufferedRow == null) {
// We have either hit the end of one of the iterators, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// The streamed row's join key matches the current buffered row's join, so walk through the
// buffered iterator to buffer the rest of the matching rows.
assert(comp == 0)
bufferMatchingRows()
true
}
}
}
/**
* Advances the streamed input iterator and buffers all rows from the buffered input that
* have matching keys.
* @return true if the streamed iterator returned a row, false otherwise. If this returns true,
* then [getStreamedRow and [[getBufferedMatches]] can be called to produce the outer
* join results.
*/
final def findNextOuterJoinRows(): Boolean = {
if (!advancedStreamed()) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// Matches the current group, so do nothing.
} else {
// The streamed row does not match the current group.
matchJoinKey = null
bufferedMatches.clear()
if (bufferedRow != null && !streamedRowKey.anyNull) {
// The buffered iterator could still contain matching rows, so we'll need to walk through
// it until we either find matches or pass where they would be found.
var comp = 1
do {
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
} while (comp > 0 && advancedBufferedToRowWithNullFreeJoinKey())
if (comp == 0) {
// We have found matches, so buffer them (this updates matchJoinKey)
bufferMatchingRows()
} else {
// We have overshot the position where the row would be found, hence no matches.
}
}
}
// If there is a streamed input then we always return true
true
}
}
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the streamed iterator and compute the new row's join key.
* @return true if the streamed iterator returned a row and false otherwise.
*/
private def advancedStreamed(): Boolean = {
if (streamedIter.advanceNext()) {
streamedRow = streamedIter.getRow
streamedRowKey = streamedKeyGenerator(streamedRow)
numStreamedRows += 1
true
} else {
streamedRow = null
streamedRowKey = null
false
}
}
/**
* Advance the buffered iterator until we find a row with join key that does not contain nulls.
* @return true if the buffered iterator returned a row and false otherwise.
*/
private def advancedBufferedToRowWithNullFreeJoinKey(): Boolean = {
var foundRow: Boolean = false
while (!foundRow && bufferedIter.advanceNext()) {
bufferedRow = bufferedIter.getRow
bufferedRowKey = bufferedKeyGenerator(bufferedRow)
numBufferedRows += 1
foundRow = !bufferedRowKey.anyNull
}
if (!foundRow) {
bufferedRow = null
bufferedRowKey = null
false
} else {
true
}
}
/**
* Called when the streamed and buffered join keys match in order to buffer the matching rows.
*/
private def bufferMatchingRows(): Unit = {
assert(streamedRowKey != null)
assert(!streamedRowKey.anyNull)
assert(bufferedRowKey != null)
assert(!bufferedRowKey.anyNull)
assert(keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
// This join key may have been produced by a mutable projection, so we need to make a copy:
matchJoinKey = streamedRowKey.copy()
bufferedMatches.clear()
do {
bufferedMatches += bufferedRow.copy() // need to copy mutable rows before buffering them
advancedBufferedToRowWithNullFreeJoinKey()
} while (bufferedRow != null && keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoin.scala | Scala | apache-2.0 | 14,588 |
package edu.gemini.pit.ui
package object action {
type ShellAction[A] = edu.gemini.pit.ui.util.ShellAction[A]
}
| arturog8m/ocs | bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/ui/action/package.scala | Scala | bsd-3-clause | 120 |
package com.ximalaya.ratel.server
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.io.Output
import com.ximalaya.ratel.common.{MessageType, RpcRequest, RpcResponse}
import com.ximalaya.ratel.interfaces.RpcTrait
import com.ximalaya.ratel.serializer.{RpcRequestSerializer, RpcResponseSerializer}
import io.netty.buffer.ByteBuf
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.handler.codec.LengthFieldBasedFrameDecoder
import org.junit.{Before, Test}
/**
* Created by think on 2017/7/16.
*/
class ComputeServer(override val version:Long) extends RpcTrait{
def compute(i:Boolean,j:Boolean):Long=0
}
class RequestDecoderTest {
private val kryo:Kryo=new Kryo()
private val channel:EmbeddedChannel= new EmbeddedChannel()
private val rpcTrait:RpcTrait=new ComputeServer(1)
private val testServer:RpcTrait=new TestServer
private val channel2:EmbeddedChannel=new EmbeddedChannel()
@Before
def before():Unit={
kryo.register(classOf[RpcRequest],new RpcRequestSerializer)
kryo.register(classOf[RpcResponse],new RpcResponseSerializer)
channel.pipeline().addLast(new LengthFieldBasedFrameDecoder(Int.MaxValue,0,8))
.addLast(new RequestDecoder(kryo))
.addLast(new RequestToResponseDecoder(rpcTrait,classOf[ComputeServer],kryo))
.addLast(new ResponseEncoder(kryo))
kryo.register(classOf[Person],new PersonSerializer)
channel2.pipeline().addLast(new LengthFieldBasedFrameDecoder(Int.MaxValue,0,8))
.addLast(new RequestDecoder(kryo))
.addLast(new RequestToResponseDecoder(testServer,classOf[TestServer],kryo))
.addLast(new ResponseEncoder(kryo))
}
@Test
def testHelloMessage(): Unit ={
val buf = channel.alloc().buffer(20, 20).writeLong(12)
buf.writeInt(MessageType.HELLO.id)
buf.writeLong(1)
val input=buf.duplicate()
assert(channel.writeInbound(input.retain()))
assert(channel.finish())
val read=channel.readInbound[ByteBuf]()
assert(read.readableBytes()==20)
assert(read.readLong()==12)
assert(read.readInt()==MessageType.HELLO.id)
assert(read.readLong()==1)
assert(channel.readInbound()==null)
buf.release()
read.release()
}
@Test
def testHelloMessage2():Unit= {
val buf = channel.alloc().buffer(20, 20).writeLong(12)
buf.writeInt(MessageType.HELLO.id)
buf.writeLong(31)
val input = buf.duplicate()
assert(!channel.writeInbound(input.readBytes(4)))
assert(!channel.writeInbound(input.readBytes(4)))
assert(!channel.writeInbound(input.readBytes(7)))
assume(channel.writeInbound(input.readBytes(5)))
val read = channel.readInbound[ByteBuf]()
assume(read != null)
assert(read.readableBytes()==20)
assert(read.readLong()==12)
assert(read.readInt()==MessageType.HELLO.id)
assert(read.readLong()==31)
assume(channel.readInbound[ByteBuf]()==null)
buf.release()
read.release()
}
@Test
def testHeartMessage():Unit={
val buf=channel.alloc().buffer(20,20).writeLong(12)
buf.writeInt(MessageType.HEART.id)
buf.writeLong(2)
val input=buf. duplicate()
assert(channel.writeInbound(input.retain()))
assert(channel.finish())
val read=channel.readInbound[ByteBuf]()
assert(read.readableBytes()==20)
assert(read.readLong()==12)
assert(read.readInt()==MessageType.HEART.id)
assert(read.readLong()==2)
assert(channel.readInbound()==null)
buf.release()
read.release()
}
@Test
def testHeartMessage2():Unit= {
val buf = channel.alloc().buffer(20, 20).writeLong(12)
buf.writeInt(MessageType.HEART.id)
buf.writeLong(33)
val input = buf.duplicate()
assert(!channel.writeInbound(input.readBytes(1)))
assert(!channel.writeInbound(input.readBytes(6)))
assert(!channel.writeInbound(input.readBytes(7)))
assume(!channel.writeInbound(input.readBytes(1)))
assume(channel.writeInbound(input.readBytes(5)))
val read = channel.readInbound[ByteBuf]()
assume(read != null)
assert(read.readableBytes()==20)
assert(read.readLong()==12)
assert(read.readInt()==MessageType.HEART.id)
assert(read.readLong()==33)
assume(channel.readInbound[ByteBuf]()==null)
buf.release()
read.release()
}
@Test
def testNormMessageForPrimitiveTypeOne():Unit={
val rpcRequest=RpcRequest(12L,"compute",Array((classOf[Boolean].getName,true),(classOf[Boolean].getName,false)))
val output=new Output(com.ximalaya.ratel.INIT_MESSAGE_SIZE,Int.MaxValue)
kryo.writeObject(output,rpcRequest)
val bytes=output.toBytes
val buf=channel.alloc().buffer(bytes.length+12,bytes.length+12).writeLong(bytes.length+4).writeInt(MessageType.NORM.id).writeBytes(bytes)
val input=buf.duplicate()
assert(channel.writeInbound(input.retain()))
val result=channel.readInbound[ByteBuf]()
assert(result!=null)
result.readLong()
result.readInt()
assert(com.ximalaya.ratel.encode(kryo,result,classOf[RpcResponse]).result.right.get._2==0)
}
@Test
def testNormMessage:Unit={
val rpcRequest=RpcRequest(13L,"test",Array((classOf[Person].getName,new Person("Li",12))))
val output=new Output(com.ximalaya.ratel.INIT_MESSAGE_SIZE,Int.MaxValue)
kryo.writeObject(output,rpcRequest)
val bytes=output.toBytes
val buf=channel2.alloc().buffer(bytes.length+12,bytes.length+12).writeLong(bytes.length+4).writeInt(MessageType.NORM.id).writeBytes(bytes)
val input=buf.duplicate()
assert(channel2.writeInbound(input.retain()))
val e=channel2.readInbound[RpcResponse]()
println(e)
channel2.writeOutbound(e)
val r=channel2.readOutbound[ByteBuf]()
/*val result=channel2.readOutbound[ByteBuf]()
assert(result!=null)
result.readLong()
result.readInt()
assert(com.ximalaya.ratel.encode(kryo,result,classOf[RpcResponse]).result.right.get._2==true)*/
}
}
| dongjiaqiang/Ratel | ratel-rpc/src/test/scala/com.ximalaya.ratel/server/RequestDecoderTest.scala | Scala | mit | 6,047 |
package collins.solr
import java.util.Date
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import Solr._
import org.specs2._
import collins.models.Asset
import collins.models.AssetFinder
import collins.models.AssetType
import collins.models.AssetMeta
import collins.models.AssetSearchParameters
import collins.models.IpAddresses
import collins.models.IpmiInfo
import collins.models.State
import collins.models.Status
import collins.models.AssetMetaValue
import collins.models.shared.PageParams
import collins.util.views.Formatter
import play.api.test.WithApplication
import play.api.test.FakeApplication
class MultiSetSpec extends mutable.Specification {
"MultiSet" should {
"add a new element" in {
(MultiSet[Int]() + 3).items must_== Map(3 -> 1)
}
"vararg constructor" in {
MultiSet(1,2,2,3).items must_== Map(1 -> 1, 2 -> 2, 3 -> 1)
}
"add an existing element" in {
(MultiSet(1,1,1) + 1).items must_== Map(1 -> 4)
}
"size" in {
MultiSet(1, 2, 2, 3, 4, 5).size must_== 6
}
"headOption" in {
MultiSet().headOption must_== None
MultiSet(1,2,3,4).headOption must_== Some(1)
}
"toSeq" in {
MultiSet(1, 2, 2, 3).toSeq must_== Seq(1,2,2,3)
}
"equals" in {
MultiSet(1,2, 3, 2) must_== MultiSet(1, 2, 2, 3)
}
}
}
class SolrSpec extends mutable.Specification {
import AssetMeta.ValueType._
import AssetMeta.ValueType
args(sequential = true)
"during serialization" should {
def eqHelper[T](actualSet: Set[T], expectedSet: Set[T]) {
if (expectedSet != actualSet) {
println("== EXPECTED ==")
expectedSet.foreach { e => println(e.toString) }
println("== ACTUAL ==")
actualSet.foreach { a => println(a.toString) }
println("== expected - actual ==")
(expectedSet diff actualSet).foreach { e => println(e.toString) }
println("== actual - expected ==")
(actualSet diff expectedSet).foreach { e => println(e.toString) }
}
}
"serialize an asset" in new WithApplication {
val assetTag = "solr1"
val assetType = AssetType.ServerNode.get
val status = Status.Allocated.get
val state = State.Running.get
val meta = List(
("A", String, 0, "a"),
("B", String, 0, "b"),
("A", String, 1, "a1"),
("int", Integer, 0, "1135"),
("double", Double, 0, "3.1415"),
("bool", Boolean, 0, "false"),
("HOSTNAME", String, 0, "my_hostname"))
val asset = generateAsset(assetTag, assetType, status, meta, state)
val indexTime = new Date
val addresses = IpAddresses.createForAsset(asset, 2, Some("DEV"))
val ipmi = IpmiInfo.createForAsset(asset)
//alldoc keys are not added to the KEYS field
val allDoc = Map(
SolrKey("DOC_TYPE", String, false, false, true) -> SolrStringValue("ASSET", StrictUnquoted),
SolrKey("LAST_INDEXED", String, false, false, true) -> SolrStringValue(Formatter.solrDateFormat(indexTime), StrictUnquoted),
SolrKey("UUID", String, false, false, true) -> SolrStringValue("ASSET_" + asset.id, StrictUnquoted))
val almostExpected = Map(
SolrKey("ID", Integer, false, false, true) -> SolrIntValue(asset.id.toInt),
SolrKey("TAG", String, false, false, true) -> SolrStringValue(assetTag, StrictUnquoted),
SolrKey("STATUS", String, false, false, true) -> SolrStringValue(status.name, StrictUnquoted),
SolrKey("STATE", String, false, false, true) -> SolrStringValue(state.name, StrictUnquoted),
SolrKey("TYPE", String, false, false, true, Set("ASSETTYPE")) -> SolrStringValue(assetType.name, StrictUnquoted),
SolrKey("CREATED", String, false, false, true) -> SolrStringValue(Formatter.solrDateFormat(asset.created), StrictUnquoted),
SolrKey("A", String, true, true, false) -> SolrMultiValue(MultiSet(SolrStringValue("a", StrictUnquoted), SolrStringValue("a1", StrictUnquoted))),
SolrKey("B", String, true, true, false) -> SolrStringValue("b", StrictUnquoted),
SolrKey("INT", Integer, true, true, false) -> SolrIntValue(1135),
SolrKey("DOUBLE", Double, true, true, false) -> SolrDoubleValue(3.1415),
SolrKey("BOOL", Boolean, true, true, false) -> SolrBooleanValue(false),
SolrKey("IP_ADDRESS", String, false, true, false) -> SolrMultiValue(MultiSet.fromSeq(addresses.map { a => SolrStringValue(a.dottedAddress, StrictUnquoted) })),
SolrKey("HOSTNAME", String, false, false, true) -> SolrStringValue("my_hostname", StrictUnquoted),
SolrKey("IPMI_ADDRESS", String, true, false, true) -> SolrStringValue(ipmi.dottedAddress, StrictUnquoted))
val sortKeys = Map(
SolrKey("DOC_TYPE_SORT", String, false, false, true) -> SolrStringValue("ASSET", StrictUnquoted),
SolrKey("LAST_INDEXED_SORT", String, false, false, true) -> SolrStringValue(Formatter.solrDateFormat(indexTime), StrictUnquoted),
SolrKey("UUID_SORT", String, false, false, true) -> SolrStringValue("ASSET_" + asset.id, StrictUnquoted),
SolrKey("ID_SORT", String, false, false, true) -> SolrStringValue(asset.id.toString, StrictUnquoted),
SolrKey("TAG_SORT", String, false, false, true) -> SolrStringValue(assetTag, StrictUnquoted),
SolrKey("STATUS_SORT", String, false, false, true) -> SolrStringValue(status.name, StrictUnquoted),
SolrKey("STATE_SORT", String, false, false, true) -> SolrStringValue(state.name, StrictUnquoted),
SolrKey("TYPE_SORT", String, false, false, true) -> SolrStringValue(assetType.name, StrictUnquoted),
SolrKey("CREATED_SORT", String, false, false, true) -> SolrStringValue(Formatter.solrDateFormat(asset.created), StrictUnquoted),
SolrKey("HOSTNAME_SORT", String, false, false, true) -> SolrStringValue("my_hostname", StrictUnquoted),
SolrKey("IPMI_ADDRESS_SORT", String, false, false, true) -> SolrStringValue(ipmi.dottedAddress, StrictUnquoted))
val expected = allDoc
.++(almostExpected)
.++(sortKeys)
.+((SolrKey("KEYS", String, true, true, false) -> SolrMultiValue(MultiSet.fromSeq(almostExpected.map { case (k, v) => SolrStringValue(k.name, StrictUnquoted) }.toSeq), String)))
val actual = AssetSerializer.serialize(asset, indexTime)
val actualSet: Set[(SolrKey, SolrValue)] = actual.toSet
val expectedSet: Set[(SolrKey, SolrValue)] = expected.toSet
eqHelper(actualSet, expectedSet)
actualSet must_== expectedSet
}
"post-process number of disks" in new WithApplication {
val m = Map[SolrKey, SolrValue](SolrKey("DISK_SIZE_BYTES", String, true, true, false) -> SolrMultiValue(MultiSet(SolrStringValue("123", StrictUnquoted), SolrStringValue("123", StrictUnquoted))))
val expected = m +
(SolrKey("NUM_DISKS", Integer, true, false, true) -> SolrIntValue(2)) +
(SolrKey("KEYS", String, true, true, false) -> SolrMultiValue(MultiSet(SolrStringValue("DISK_SIZE_BYTES", StrictUnquoted), SolrStringValue("NUM_DISKS", StrictUnquoted))))
val actual = AssetSerializer.postProcess(m)
val actualSet = actual.toSet
val expectedSet = expected.toSet
eqHelper(actualSet, expectedSet)
actualSet must_== expectedSet
}
}
"search" should {
val pageParam = PageParams(0, 10, "DESC", "TAG")
def reindex() {
// repopulate solr - HARD CODED TIME - DO THIS BETTER
Await.result(SolrHelper.populate(), Duration(5, java.util.concurrent.TimeUnit.SECONDS))
}
"must find asset with state filter" in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
// create the asset that matches the search
val assetTag = "asset1"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, Nil, State.Running.get)
// this asset is not included in the results
generateAsset("asset2", AssetType.ServerNode.get, Status.Provisioned.get, Nil, State.Starting.get)
reindex()
val finder = AssetFinder.empty.copy(state = State.Running)
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), finder)
page.items.size mustEqual 1
page.items.headOption must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
}
}
"must find asset with meta fields " in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
val meta = List(
("HOST", String, 0, "my_host"))
// create the asset that matches the search
val assetTag = "asset3"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, meta, State.New.get)
// this asset is not included in the results
generateAsset("asset4", AssetType.ServerNode.get, Status.Allocated.get, Nil, State.New.get)
reindex()
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("HOST", "my_host")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), AssetFinder.empty, None)
page.items.size mustEqual 1
page.items.headOption must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
}
}
"must find asset with meta fields ignoring case" in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
val meta = List(
("CaSe_IgNoRe", String, 0, "Ignore_THIS_case"))
// create the asset that matches the search
val assetTag = "asset5"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, meta, State.New.get)
// this asset is not included in the results
generateAsset("asset6", AssetType.ServerNode.get, Status.Allocated.get, Nil, State.New.get)
reindex()
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("case_ignore", "IGNORE_THIS_case")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), AssetFinder.empty, None)
page.items.size mustEqual 1
page.items.headOption must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
}
}
"must find asset with meta and regular fields " in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
val meta = List(("ATTR", String, 0, "ATTRV"))
// create the asset that matches the search
val assetTag = "asset7"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, meta, State.New.get)
// this asset is not included in the results
generateAsset("asset8", AssetType.ServerNode.get, Status.Allocated.get, Nil, State.New.get)
reindex()
val finder = AssetFinder.empty.copy(status = Status.Allocated)
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("ATTR", "ATTRV")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), finder, None)
page.items.size mustEqual 1
page.items.headOption must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
}
}
"must find asset with and'ing conditional " in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
val meta = List(("X", String, 0, "X"),
("Y", String, 0, "Y"))
// create the asset that matches the search
val assetTag = "asset9"
val asset = generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, meta, State.New.get)
// this asset is not included in the results
generateAsset("asset10", AssetType.ServerNode.get, Status.Allocated.get, Nil, State.New.get)
reindex()
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("X", "X").withMeta("Y", "Y")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), AssetFinder.empty, Some("and"))
page.items.size mustEqual 1
page.items.headOption must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
}
}
"must find asset with or'ing conditional " in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
// create the asset that matches the search
val assetTag = "asset11"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, List(("T", String, 0, "T")), State.New.get)
// this asset is *also* included in the results
val assetTag2 = "asset12"
generateAsset(assetTag2, AssetType.ServerNode.get, Status.Provisioned.get, List(("U", String, 0, "U")), State.New.get)
reindex()
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("T", "T").withMeta("U", "U")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), AssetFinder.empty, Some("or"))
page.items.size mustEqual 2
page.items.find { a => a.tag == assetTag } must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
asset.getMetaAttributeValue("T") mustEqual Some("T")
asset.getMetaAttributeValue("U") mustEqual None
}
page.items.find { a => a.tag == assetTag2 } must beSome.which { asset =>
asset.tag mustEqual assetTag2
asset.status mustEqual Status.Provisioned.get.getId()
asset.getMetaAttributeValue("T") mustEqual None
asset.getMetaAttributeValue("U") mustEqual Some("U")
}
}
"must find asset with partial attribute match " in new WithApplication(FakeApplication(
additionalConfiguration = Map(
"solr.enabled" -> true,
"solr.repopulateOnStartup" -> true))) {
// create the asset that matches the search
val assetTag = "asset13"
generateAsset(assetTag, AssetType.ServerNode.get, Status.Allocated.get, List(("SPECIFICATION", String, 0, "WEB SERVICE FURY HADOOP")), State.New.get)
// this asset is not included in the results
generateAsset("asset14", AssetType.ServerNode.get, Status.Provisioned.get, Nil, State.New.get)
reindex()
val ra = collins.util.AttributeResolver.EmptyResolvedAttributes.withMeta("SPECIFICATION", "FURY")
val page = Asset.find(pageParam, (ra.ipmi, ra.assetMeta, ra.ipAddress.toList), AssetFinder.empty)
page.items.size mustEqual 1
page.items.find { a => a.tag == assetTag } must beSome.which { asset =>
asset.tag mustEqual assetTag
asset.status mustEqual Status.Allocated.get.getId()
asset.getMetaAttributeValue("SPECIFICATION") mustEqual Some("WEB SERVICE FURY HADOOP")
}
}
}
def generateAsset(tag: String, assetType: AssetType, status: Status, metaValues: Seq[(String, ValueType, Int, String)], state: State) = {
val asset = Asset.create(Asset(tag, status, assetType))
Asset.partialUpdate(asset, None, None, Some(state))
metaValues.foreach{case (name, value_type, group_id, value) =>
val meta = AssetMeta.findOrCreateFromName(name, value_type)
try {
AssetMetaValue.create(AssetMetaValue(asset.id, meta.id, group_id, value))
} catch {
case e: RuntimeException =>
Thread.sleep(1000)
AssetMetaValue.create(AssetMetaValue(asset.id, meta.id, group_id, value))
}
}
Asset.findById(asset.id).get
}
}
class SolrQuerySpec extends mutable.Specification {
def P = CollinsQueryParser()
import CollinsQueryDSL._
import AssetMeta.ValueType._
"CollinsQueryDSL" should {
"key vals" in {
"int" in {
(("foosolr" -> 3): SolrKeyVal) must_== SolrKeyVal("foosolr", SolrIntValue(3))
}
"bool" in {
(("foosolr" -> false): SolrKeyVal) must_== SolrKeyVal("foosolr", SolrBooleanValue(false))
}
"double" in {
(("foosolr" -> 3.1415): SolrKeyVal) must_== SolrKeyVal("foosolr", SolrDoubleValue(3.1415))
}
"string" in {
(("foosolr" -> "bar"): SolrKeyVal) must_== SolrKeyVal("foosolr", SolrStringValue("bar"))
}
"quoted string" in {
(("foosolr" -> "bar".quoted): SolrKeyVal) must_== SolrKeyVal("foosolr", SolrStringValue("bar", Quoted))
}
}
}
"CollinsQueryParser" should {
"empty.query.where" in {
"*".query.where must_== EmptySolrQuery
}
"selects" in {
"defaults to asset" in {
"foo = bar".query.select must_== AssetDocType
}
"select assets" in {
"SELECT asset WHERE foo = bar".query.select must_== AssetDocType
}
"select logs" in {
val p = CollinsQueryParser(List(AssetDocType, AssetLogDocType))
p.parseQuery("SELECT asset_log WHERE foo = bar").right.get.select must_== AssetLogDocType
}
"Reject unknown select type" in {
CollinsQueryParser().parseQuery("SELECT omgwtfbbq WHERE foo = bar") must beAnInstanceOf[Left[String, CQLQuery]]
}
"clean string" in {
"trim whitespace" in {
"""
foo = bar
""".query.where must_== SolrKeyVal("foo", SolrStringValue("bar"))
}
"remove enclosing quotes" in {
""" "foo = bar" """.query.where must_== SolrKeyVal("foo", SolrStringValue("bar"))
}
}
}
"SolrKey" should {
"properly convert names to U case" in {
val n: String = SolrKey("foo", String, false, false, false).name
n must_== "FOO"
}
"convert aliases to UCASE" in {
SolrKey("foo", String, false, false, false, Set("bar", "BAZ")).isAliasOf("bar") must beTrue
SolrKey("foo", String, false, false, false, Set("bar", "BAZ")).isAliasOf("baz") must beTrue
}
"force multivalued keys to be non-sortable" in {
SolrKey("foo", String, false, true, true).sortKey must_== None
}
}
"key-value" in {
"string value" in {
"""foosolr = "bar"""".query.where must_== (("foosolr" -> "bar".quoted): SolrKeyVal)
}
"int value" in {
"""foosolr = 3""".query.where must_== (("foosolr" -> "3"): SolrKeyVal)
}
"double value" in {
"""foosolr = 3.1415""".query.where must_== (("foosolr" -> "3.1415"): SolrKeyVal)
}
"boolean value" in {
"""foosolr = false""".query.where must_== (("foosolr" -> "false"): SolrKeyVal)
}
"leading regex wildcard" in {
"""foosolr = .*bar""".query.where must_== SolrKeyVal("foosolr", SolrStringValue("bar", LWildcard))
}
"number-start string value" in {
"""foosolr = 03abc.xyz09-wer:10""".query.where must_== SolrKeyVal("foosolr", SolrStringValue("03abc.xyz09-wer:10", Unquoted))
}
"unquoted mac address" in {
"""foosolr = 04:7d:7b:06:8f:f9""".query.where must_== SolrKeyVal("foosolr", SolrStringValue("04:7d:7b:06:8f:f9", Unquoted))
}
"ip address" in {
"""ip_address = "192.168.1.1"""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("192.168.1.1", Quoted))
}
"unquoted ip address" in {
"""ip_address = 192.168.1.1""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("192.168.1.1", Unquoted))
"""ip_address = 192.168.1.*""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("192.168.1", RWildcard))
"""ip_address = 192.168.*""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("192.168", RWildcard))
"""ip_address = 192.*""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("192", RWildcard))
"""ip_address = *""".query.where must_== SolrKeyVal("ip_address", SolrStringValue("*", FullWildcard))
}
}
"ranges" in {
"both inclusive" in {
"""foosolr = [3, 5]""".query.where must_== SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), Some(SolrStringValue("5", StrictUnquoted)), true)
}
"range opt low" in {
"""foosolr = [*, 5]""".query.where must_== SolrKeyRange("foosolr", None, Some(SolrStringValue("5", StrictUnquoted)), true)
}
"range opt high" in {
"""foosolr = [3, *]""".query.where must_== SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), None, true)
}
"range opt both" in {
"""foosolr = [*, *]""".query.where must_== SolrKeyRange("foosolr", None, None, true)
}
"open range" in {
"""foosolr = (3, 5)""".query.where must_== SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), Some(SolrStringValue("5", StrictUnquoted)), false)
}
"clopen range" in {
val p1 = SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), None, true)
val p2 = SolrKeyRange("foosolr", None, Some(SolrStringValue("5", StrictUnquoted)), false)
"""foosolr = [3, 5)""".query.where must_== (p1 AND p2)
}
">" in {
"""foosolr > 3""".query.where must_== SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), None, false)
}
">=" in {
"""foosolr >= 3""".query.where must_== SolrKeyRange("foosolr", Some(SolrStringValue("3", StrictUnquoted)), None, true)
}
"<" in {
"""foosolr < 5""".query.where must_== SolrKeyRange("foosolr", None, Some(SolrStringValue("5", StrictUnquoted)), false)
}
"<=" in {
"""foosolr <= 5""".query.where must_== SolrKeyRange("foosolr", None, Some(SolrStringValue("5", StrictUnquoted)), true)
}
"< date" in {
val t = new Date
val s = Formatter.solrDateFormat(t)
"foosolr < %s".format(s).query.where must_== SolrKeyRange("foosolr", None, Some(SolrStringValue(s, StrictUnquoted)), false)
}
}
"complex expressions" in {
"simple AND" in {
"""foosolr = 3 AND bar = 4""".query.where must_== (("foosolr" -> "3") AND ("bar" -> "4"))
}
"simple OR" in {
"""foosolr = 3 OR bar = 4""".query.where must_== (("foosolr" -> "3") OR ("bar" -> "4"))
}
"case insensitive AND" in {
"""foosolr = 3 and bar = 4""".query must_== """foosolr = 3 AND bar = 4""".query
}
"case insensitive OR" in {
"""foosolr = 3 or bar = 4""".query must_== """foosolr = 3 OR bar = 4""".query
}
"order of operations" in {
"""foosolr = 4 OR bar = 4 AND baz = false""".query.where must_== (("foosolr" -> "4") OR ("bar" -> "4" AND "baz" -> "false"))
}
"arbitrary parentheses" in {
"""(((((((foosolr = true)))))))""".query.where must_== SolrKeyVal("foosolr", SolrStringValue("true", Unquoted))
}
"simple NOT" in {
"""NOT foosolr = 5""".query.where must_== CollinsQueryDSL.not("foosolr" -> "5")
}
"case insensitive NOT" in {
"""not foosolr = 5""".query must_== """NOT foosolr = 5""".query
}
"not OOO" in {
"""NOT foosolr = 5 OR bar = false""".query.where must_== (SolrNotOp(("foosolr" -> "5")) OR ("bar" -> "false"))
}
"negate complex expression" in {
"""NOT (foosolr = 5 AND bar = "baz")""".query.where must_== SolrNotOp(("foosolr" -> "5") AND ("bar" -> "baz".quoted))
}
"!=" in {
"""foosolr != 5""".query.where must_== SolrNotOp(SolrKeyVal("foosolr", SolrStringValue("5", Unquoted)))
}
}
}
"StringValueFormat" should {
def s(str: String) = StringValueFormat.createValueFor(str)
def S(str: String, format: StringValueFormat) = SolrStringValue(str, format)
def LR = LRWildcard
def L = LWildcard
def R = RWildcard
def Q = Quoted
def U = Unquoted
"handle wildcarding" in {
"foo" in {
s("foo") must_== S("foo", U)
}
"*foo" in {
s("*foo") must_== S("foo", L)
}
"*foo*"in {
s("*foo*") must_== S("foo", LR)
}
"foo*"in {
s("foo*") must_== S("foo", R)
}
"foo.*"in {
s("foo.*") must_== S("foo", R)
}
"^foo"in {
s("^foo") must_== S("foo", R)
}
"^foo.*"in {
s("^foo.*") must_== S("foo", R)
}
"^foo*"in {
s("^foo*") must_== S("foo", R)
}
"^foo$"in {
s("^foo$") must_== S("foo", Q)
}
"*foo$"in {
s("*foo$") must_== S("foo", L)
}
"foo$"in {
s("foo$") must_== S("foo", L)
}
}
"handle character escaping" in {
"quoted" in {
S("04:7d:7b:06:8f:f9", Q).traverseQueryString(false) must_== """"04:7d:7b:06:8f:f9""""
}
"wildcard" in {
S("04:7d:7b:06:8",R).traverseQueryString(false) must_== """04\\:7d\\:7b\\:06\\:8*"""
}
"strict unquoted" in {
S("04:7d:7b:06:8f:f9", StrictUnquoted).traverseQueryString(false) must_== """04\\:7d\\:7b\\:06\\:8f\\:f9"""
}
}
}
"CQL abstract syntax-tree" should {
"solr query generation" in {
"empty query" in {
"*".query.where.traverseQueryString must_== "*:*"
}
"field wildcard" in {
"tag = *".query.where.traverseQueryString must_== "tag:*"
}
"simple keyval" in {
//the quotes are expected since it hasn't type inferred to an int yet
"foosolr = 3".query.where.traverseQueryString must_== """foosolr:"3""""
}
"not pad unquoted unchecked strings with wildcards" in {
"foo = bar".query.where.traverseQueryString must_== """foo:"bar""""
}
"handle ^" in {
"foo = ^bar".query.where.traverseQueryString must_== """foo:bar*"""
}
"handle $" in {
"foo = bar$".query.where.traverseQueryString must_== """foo:*bar"""
}
"handle both ^ and $" in {
"foo = ^bar$".query.where.traverseQueryString must_== """foo:"bar""""
}
"not handle ^ or $ in quoted string" in {
"""foo = "^bar$"""".query.where.traverseQueryString must_== """foo:"^bar$""""
}
"quoted dash" in {
"""tag=-""".query.where.traverseQueryString must_== """tag:"-""""
}
"leading wildcard" in {
"""hostname=*foo""".query.where.traverseQueryString must_== """hostname:*foo"""
}
"trailing wildcard" in {
"""hostname=foo*""".query.where.traverseQueryString must_== """hostname:foo*"""
}
"not quote ranges" in {
"""foo = [abc, abd]""".query.where.traverseQueryString must_== """foo:[abc TO abd]"""
}
"ANDs" in {
"""foosolr = 3 AND bar = "abcdef" AND baz = true""".query.where.traverseQueryString must_== """foosolr:"3" AND bar:"abcdef" AND baz:"true""""
}
"ORs" in {
"""foosolr = 3 OR bar = "abcdef" OR baz = true""".query.where.traverseQueryString must_== """foosolr:"3" OR bar:"abcdef" OR baz:"true""""
}
"NOT" in {
"""NOT foosolr = 3""".query.where.traverseQueryString must_== """-foosolr:"3""""
}
"NOT with multi" in {
"""NOT (foosolr = 3 AND foosolr = 5)""".query.where.traverseQueryString must_== """-(foosolr:"3" AND foosolr:"5")"""
}
"nested exprs" in {
"""(foosolr = 3 OR foosolr = 4) AND (bar = true OR (bar = false AND baz = 5))""".query.where.traverseQueryString must_== """(foosolr:"3" OR foosolr:"4") AND (bar:"true" OR (bar:"false" AND baz:"5"))"""
}
"support unquoted one-word strings" in {
"""foosolr = bar""".query.where.traverseQueryString must_== """foosolr:"bar""""
}
}
"type checking" in new WithApplication {
def A(s: SolrExpression) = ("DOC_TYPE" -> "ASSET".strictUnquoted) AND s
"keyvals" in {
val m = AssetMeta.findOrCreateFromName("foosolr", Integer)
"foosolr = 3".query.typeCheck must_== Right(A(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3))))
"foosolr = 3.123".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
"foosolr = true".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
"""foosolr = "3"""".query.typeCheck must_== Right(A(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3))))
}
"case insensitive key" in {
"FoOsOlR = 3".query.typeCheck must_== Right(A(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3))))
}
"valid enum" in {
"""type = "SERVER_NODE"""".query.typeCheck must_== Right(A(SolrKeyVal("TYPE", SolrStringValue("SERVER_NODE", StrictUnquoted))))
}
"case insensitive status enum" in {
"""status = unallocated""".query.typeCheck must_== "STATUS = Unallocated".query.typeCheck
}
"case insensitive type enum" in {
"""type = server_node""".query.typeCheck must_== """TYPE = SERVER_NODE""".query.typeCheck
}
"invalid enum" in {
"""type = "FOOSOLRBARRRRRR"""".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
}
"use enum id" in {
"""type = 1""".query.typeCheck must_== "TYPE = SERVER_NODE".query.typeCheck
}
"generated key" in {
"""num_disks = 3""".query.typeCheck must_== Right(A(SolrKeyVal("NUM_DISKS_meta_i", SolrIntValue(3))))
}
"AND" in {
"foosolr = 3 AND foosolr = false".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
}
"OR" in {
"foosolr = 3 OR foosolr = false".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
}
"range" in {
"foosolr = [3, 5]".query.typeCheck must_== Right(A(SolrKeyRange("FOOSOLR_meta_i", Some(SolrIntValue(3)), Some(SolrIntValue(5)), true)))
"foosolr = [3, *]".query.typeCheck must_== Right(A(SolrKeyRange("FOOSOLR_meta_i", Some(SolrIntValue(3)), None, true)))
"foosolr = [*, 5]".query.typeCheck must_== Right(A(SolrKeyRange("FOOSOLR_meta_i", None, Some(SolrIntValue(5)), true)))
"foosolr = [*, *]".query.typeCheck must_== Right(A(SolrKeyRange("FOOSOLR_meta_i", None, None, true)))
"foosolr = [false, 5]".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
"foosolr = [3, false]".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
}
"not lose NOT" in {
"NOT foosolr = 3".query.typeCheck must_== Right(A(SolrNotOp(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3)))))
}
"De Morgan applied to NOTs in multi AND" in {
"NOT foosolr = 3 AND NOT foosolr = 5".query.typeCheck must_== Right(A(SolrNotOp(SolrOrOp(Set(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3)), SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(5)))))))
}
"De Morgan applied to NOTs in multi OR" in {
"NOT foosolr = 3 OR NOT foosolr = 5".query.typeCheck must_== Right(A(SolrNotOp(SolrAndOp(Set(SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(3)), SolrKeyVal("FOOSOLR_meta_i", SolrIntValue(5)))))))
}
"tag search" in {
"""tag = test""".query.typeCheck must_== Right(A(SolrKeyVal("TAG", SolrStringValue("test", Quoted))))
}
"not allow partial wildcard on numeric values" in {
"""foosolr = 3*""".query.typeCheck must beAnInstanceOf[Left[String, SolrExpression]]
}
"TAG can be explicitly wildcarded" in {
"""tag = *foo*""".query.typeCheck must_== Right(A(SolrKeyVal("TAG", SolrStringValue("foo", LRWildcard))))
}
}
}
"AssetFinder solr conversion" should {
"basic conversion" in new WithApplication {
val somedate = new java.util.Date
val dateString = collins.util.views.Formatter.solrDateFormat(somedate)
val afinder = AssetFinder(
Some("foosolrtag"),
Status.Allocated,
Some(AssetType.ServerNode.get),
Some(somedate),
Some(somedate),
Some(somedate),
Some(somedate),
Some(State.Running.get),
None
)
val expected = List(
SolrKeyVal("tag", SolrStringValue("foosolrtag", Unquoted)),
SolrKeyVal("status", SolrIntValue(Status.Allocated.get.id)),
SolrKeyVal("assetType", SolrIntValue(AssetType.ServerNode.get.id)),
SolrKeyRange("created", Some(SolrStringValue(dateString, StrictUnquoted)),Some(SolrStringValue(dateString, StrictUnquoted)), true),
SolrKeyRange("updated", Some(SolrStringValue(dateString, StrictUnquoted)),Some(SolrStringValue(dateString, StrictUnquoted)), true),
SolrKeyVal("state", SolrIntValue(State.Running.get.id))
)
afinder.toSolrKeyVals.toSet must_== expected.toSet
}
"open date ranges" in {
val somedate = new java.util.Date
val dateString = collins.util.views.Formatter.solrDateFormat(somedate)
val afinder = AssetFinder(
None,
None,
None,
None,
Some(somedate),
Some(somedate),
None,
None,
None
)
val expected = List(
SolrKeyRange("updated", Some(SolrStringValue(dateString, StrictUnquoted)),None, true),
SolrKeyRange("created",None,Some(SolrStringValue(dateString, StrictUnquoted)), true)
)
afinder.toSolrKeyVals.toSet must_== expected.toSet
}
"mix with raw cql query" in new WithApplication {
val cql = "foo = bar AND (baz = asdf OR abcdef = 3)".query.where
val afinder = AssetFinder(
tag = Some("tagvalue"),
status = Status.Allocated,
None,
None,
None,
None,
None,
None,
query = Some(cql)
)
val expected = List(
SolrKeyVal("tag", "tagvalue".unquoted),
SolrKeyVal("status", SolrIntValue(Status.Allocated.get.id)),
cql
)
afinder.toSolrKeyVals.toSet must_== expected.toSet
}
}
"AssetSearchParameters conversion" should {
"basic conversion" in new WithApplication {
//finder
val somedate = new java.util.Date
val dateString = collins.util.views.Formatter.solrDateFormat(somedate)
val afinder = AssetFinder(
Some("footag"),
Some(Status.Allocated.get),
AssetType.ServerNode,
Some(somedate),
Some(somedate),
Some(somedate),
Some(somedate),
None,
None
)
val ipmiTuples = (IpmiInfo.Enum.IpmiAddress -> "ipmi_address") :: (IpmiInfo.Enum.IpmiUsername -> "ipmi_username") :: Nil
val metaTuples = (AssetMeta("meta1", 0, "meta1", "meta1") -> "meta1_value") :: (AssetMeta("meta2", 0, "meta2", "meta2") -> "meta2_value") :: Nil
val ipAddresses = List("1.2.3.4")
val resultTuple = (ipmiTuples, metaTuples, ipAddresses)
val expected: SolrExpression = SolrAndOp(Set(
SolrKeyVal("IPMI_ADDRESS", SolrStringValue("ipmi_address", Unquoted)),
SolrKeyVal("IPMI_USERNAME", SolrStringValue("ipmi_username", Unquoted)),
SolrKeyVal("meta1", SolrStringValue("meta1_value", Unquoted)),
SolrKeyVal("meta2", SolrStringValue("meta2_value", Unquoted)),
SolrKeyVal("ip_address", SolrStringValue("1.2.3.4", Unquoted)),
SolrKeyRange("created", Some(SolrStringValue(dateString, StrictUnquoted)),Some(SolrStringValue(dateString, StrictUnquoted)), true),
SolrKeyRange("updated", Some(SolrStringValue(dateString, StrictUnquoted)),Some(SolrStringValue(dateString, StrictUnquoted)), true),
SolrKeyVal("tag", SolrStringValue("footag", Unquoted)),
SolrKeyVal("status", SolrIntValue(Status.Allocated.get.id)),
SolrKeyVal("assetType", SolrIntValue(AssetType.ServerNode.get.id))
))
val p = AssetSearchParameters(resultTuple, afinder)
p.toSolrExpression must_== expected
}
}
}
class SolrServerSpecification extends mutable.Specification {
def home = SolrConfig.embeddedSolrHome
"solr server" should {
"get solrhome config" in new WithApplication {
home mustNotEqual "NONE"
}
"launch embedded server without crashing" in new WithApplication(FakeApplication(additionalConfiguration = Map(
"solr.enabled" -> true
))) {
true must_== true
}
}
}
| funzoneq/collins | test/collins/util/SolrSpec.scala | Scala | apache-2.0 | 36,619 |
package com.szadowsz.spark.ml
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.{StructField, StructType}
/**
* Created on 29/04/2016.
*/
package object expr {
implicit class SchemaUtil(val schema: StructType) {
def fieldIndexOf(col : String):Int = schema.fieldNames.indexOf(col)
def get(col : String): Option [StructField] = schema.find(_.name == col)
}
implicit private[szadowsz] class DataFrameUtil(val dataset: DataFrame) {
/**
* Method to count the number of rows that have each category.
*
* @param head required column name
* @param tail optional additional column names
* @return aggregated count of distinct categories
*/
def countDistinct(head: String, tail: String*): Array[Map[Any, Long]] = {
val nominalData = dataset.select(head, tail: _*)
val length = nominalData.schema.fields.length
// count the categories per partition
val summaryData = nominalData.rdd.mapPartitions(rows => {
val maps = Array.fill(length)(Map[Any, Long]())
rows.foreach(row => {
row.toSeq.zipWithIndex.foreach {
case (field, index) => if (field != null) {
val tmp = maps(index)
maps(index) = tmp + (field -> (tmp.getOrElse(field, 0L) + 1L))
}
}
})
List(maps).toIterator
}, false)
// aggregate the partitions into an array of maps
summaryData.reduce { case (a1, a2) =>
a1.zip(a2).map { case (m1, m2) =>
m1.foldLeft(m2) { case (m, (k, v)) =>
val n = m.getOrElse(k, 0L) + v
m + (k -> n)
}
}
}
}
}
}
| zakski/project-cadisainmduit | module/spark/src/main/scala/com/szadowsz/spark/ml/expr/expr.scala | Scala | apache-2.0 | 1,694 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.internal.fsm.streaming
import io.rdbc.pgsql.core._
import io.rdbc.pgsql.core.exception.PgProtocolViolationException
import io.rdbc.pgsql.core.internal.fsm.{State, StateAction}
import io.rdbc.pgsql.core.internal.protocol.messages.backend._
import io.rdbc.pgsql.core.internal.protocol.messages.frontend.PortalName
import io.rdbc.pgsql.core.internal.{PgMsgHandler, PgRowPublisher, PortalDescData}
import scala.concurrent.{ExecutionContext, Promise}
private[core]
class StrmWaitingForDescribe private[fsm](txMgmt: Boolean,
publisher: PgRowPublisher,
portalName: Option[PortalName],
describePromise: Promise[PortalDescData],
parsePromise: Promise[Unit])
(implicit out: ChannelWriter, ec: ExecutionContext)
extends State {
@volatile private[this] var maybePortalDescData = Option.empty[PortalDescData]
protected val msgHandler: PgMsgHandler = {
case ParseComplete =>
parsePromise.success(())
stay
case BindComplete => stay
case _: ParameterDescription => stay
case NoData => completeDescribePromise(RowDescription.empty)
case rowDesc: RowDescription => completeDescribePromise(rowDesc)
case _: ReadyForQuery =>
maybePortalDescData match {
case None =>
val ex = new PgProtocolViolationException(
"Ready for query received without prior row description"
)
fatal(ex) andThenF sendFailureToClient(ex)
case Some(afterDescData) =>
goto(State.Streaming.pullingRows(txMgmt, afterDescData, publisher)) andThenF publisher.resume()
}
}
private def completeDescribePromise(rowDesc: RowDescription): StateAction = {
val warningsPromise = Promise[Vector[StatusMessage.Notice]]
val rowsAffectedPromise = Promise[Long]
val portalDescData = PortalDescData(
rowDesc = rowDesc,
warningsPromise = warningsPromise,
rowsAffectedPromise = rowsAffectedPromise
)
maybePortalDescData = Some(portalDescData)
describePromise.success(portalDescData)
stay
}
private def sendFailureToClient(ex: Throwable): Unit = {
maybePortalDescData match {
case Some(PortalDescData(_, warningsPromise, rowsAffectedPromise)) =>
warningsPromise.failure(ex)
rowsAffectedPromise.failure(ex)
parsePromise.failure(ex)
describePromise.failure(ex)
case None =>
if (!parsePromise.isCompleted) {
parsePromise.failure(ex) //TODO this is not safe, is it? maybe it is because of single I/O thread
}
describePromise.failure(ex)
}
publisher.failure(ex)
//TODO will this failure be signalled twice by publisher?
// one by failure, second by describePromise?
}
protected def onNonFatalError(ex: Throwable): StateAction = {
goto(State.Streaming.queryFailed(txMgmt, portalName) {
sendFailureToClient(ex)
})
}
protected def onFatalError(ex: Throwable): Unit = {
sendFailureToClient(ex)
}
}
| rdbc-io/rdbc-pgsql | rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingForDescribe.scala | Scala | apache-2.0 | 3,775 |
package epfl
class LabeledGraph extends Graph {
class Node(label: String) extends BaseNode {
def getLabel: String = label
def self: Node = this
}
}
| ashawley/odersky-zenger-2005 | src/main/scala/epfl/LabeledGraph.scala | Scala | cc0-1.0 | 161 |
package mesosphere.marathon.integration.setup
import mesosphere.marathon.integration.facades.MarathonFacade
import org.scalatest.{ ConfigMap, Suite }
import org.slf4j.LoggerFactory
object MarathonClusterIntegrationTest {
private val log = LoggerFactory.getLogger(getClass)
}
/**
* Convenient trait to test against a Marathon cluster.
*
* The cluster sized is determined by [[IntegrationTestConfig.clusterSize]].
*/
trait MarathonClusterIntegrationTest extends SingleMarathonIntegrationTest { self: Suite =>
lazy val marathonFacades: Seq[MarathonFacade] = config.marathonUrls.map(url => new MarathonFacade(url, testBasePath))
override protected def beforeAll(configMap: ConfigMap): Unit = {
super.beforeAll(configMap)
val parameters = List("--master", config.master, "--event_subscriber", "http_callback") ++ extraMarathonParameters
config.marathonPorts.tail.foreach(port => startMarathon(port, parameters: _*))
}
}
| ss75710541/marathon | src/test/scala/mesosphere/marathon/integration/setup/MarathonClusterIntegrationTest.scala | Scala | apache-2.0 | 947 |
import java.io.IOException
import java.util.Scanner
object App {
private val EXIT: String = "exit"
private val POLL: String = "poll"
@throws[IOException]
def main(args: Array[String]) {
lazy val sensors = List(new FireSensor, new SmokeSensor)
lazy val controlUnit: ControlUnit = new ControlUnit(sensors)
val scanner: Scanner = new Scanner(System.in)
var input: String = ""
while (input != EXIT) {
println("Type \\"poll\\" to poll all sensors once or \\"exit\\" to exit")
input = scanner.nextLine
if (input == POLL) {
controlUnit.pollSensors()
}
}
}
}
| BBK-PiJ-2015-67/sdp-portfolio | exercises/week02/alarmsystem-scala/src/App.scala | Scala | unlicense | 618 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import java.io.File
import com.google.common.base.Charsets
import com.google.common.io.{BaseEncoding, Files}
import io.fabric8.kubernetes.api.model.{ContainerBuilder, HasMetadata, PodBuilder, Secret}
import org.mockito.{Mock, MockitoAnnotations}
import org.scalatest.BeforeAndAfter
import scala.collection.JavaConverters._
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.{KubernetesConf, KubernetesDriverSpecificConf, SparkPod}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.util.Utils
class DriverKubernetesCredentialsFeatureStepSuite extends SparkFunSuite with BeforeAndAfter {
private val KUBERNETES_RESOURCE_NAME_PREFIX = "spark"
private val APP_ID = "k8s-app"
private var credentialsTempDirectory: File = _
private val BASE_DRIVER_POD = SparkPod.initialPod()
@Mock
private var driverSpecificConf: KubernetesDriverSpecificConf = _
before {
MockitoAnnotations.initMocks(this)
credentialsTempDirectory = Utils.createTempDir()
}
after {
credentialsTempDirectory.delete()
}
test("Don't set any credentials") {
val kubernetesConf = KubernetesConf(
new SparkConf(false),
driverSpecificConf,
KUBERNETES_RESOURCE_NAME_PREFIX,
APP_ID,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None)
val kubernetesCredentialsStep = new DriverKubernetesCredentialsFeatureStep(kubernetesConf)
assert(kubernetesCredentialsStep.configurePod(BASE_DRIVER_POD) === BASE_DRIVER_POD)
assert(kubernetesCredentialsStep.getAdditionalPodSystemProperties().isEmpty)
assert(kubernetesCredentialsStep.getAdditionalKubernetesResources().isEmpty)
}
test("Only set credentials that are manually mounted.") {
val submissionSparkConf = new SparkConf(false)
.set(
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$OAUTH_TOKEN_FILE_CONF_SUFFIX",
"/mnt/secrets/my-token.txt")
.set(
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CLIENT_KEY_FILE_CONF_SUFFIX",
"/mnt/secrets/my-key.pem")
.set(
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CLIENT_CERT_FILE_CONF_SUFFIX",
"/mnt/secrets/my-cert.pem")
.set(
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CA_CERT_FILE_CONF_SUFFIX",
"/mnt/secrets/my-ca.pem")
val kubernetesConf = KubernetesConf(
submissionSparkConf,
driverSpecificConf,
KUBERNETES_RESOURCE_NAME_PREFIX,
APP_ID,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None)
val kubernetesCredentialsStep = new DriverKubernetesCredentialsFeatureStep(kubernetesConf)
assert(kubernetesCredentialsStep.configurePod(BASE_DRIVER_POD) === BASE_DRIVER_POD)
assert(kubernetesCredentialsStep.getAdditionalKubernetesResources().isEmpty)
val resolvedProperties = kubernetesCredentialsStep.getAdditionalPodSystemProperties()
resolvedProperties.foreach { case (propKey, propValue) =>
assert(submissionSparkConf.get(propKey) === propValue)
}
}
test("Mount credentials from the submission client as a secret.") {
val caCertFile = writeCredentials("ca.pem", "ca-cert")
val clientKeyFile = writeCredentials("key.pem", "key")
val clientCertFile = writeCredentials("cert.pem", "cert")
val submissionSparkConf = new SparkConf(false)
.set(
s"$KUBERNETES_AUTH_DRIVER_CONF_PREFIX.$OAUTH_TOKEN_CONF_SUFFIX",
"token")
.set(
s"$KUBERNETES_AUTH_DRIVER_CONF_PREFIX.$CLIENT_KEY_FILE_CONF_SUFFIX",
clientKeyFile.getAbsolutePath)
.set(
s"$KUBERNETES_AUTH_DRIVER_CONF_PREFIX.$CLIENT_CERT_FILE_CONF_SUFFIX",
clientCertFile.getAbsolutePath)
.set(
s"$KUBERNETES_AUTH_DRIVER_CONF_PREFIX.$CA_CERT_FILE_CONF_SUFFIX",
caCertFile.getAbsolutePath)
val kubernetesConf = KubernetesConf(
submissionSparkConf,
driverSpecificConf,
KUBERNETES_RESOURCE_NAME_PREFIX,
APP_ID,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None)
val kubernetesCredentialsStep = new DriverKubernetesCredentialsFeatureStep(kubernetesConf)
val resolvedProperties = kubernetesCredentialsStep.getAdditionalPodSystemProperties()
val expectedSparkConf = Map(
s"$KUBERNETES_AUTH_DRIVER_CONF_PREFIX.$OAUTH_TOKEN_CONF_SUFFIX" -> "<present_but_redacted>",
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$OAUTH_TOKEN_FILE_CONF_SUFFIX" ->
DRIVER_CREDENTIALS_OAUTH_TOKEN_PATH,
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CLIENT_KEY_FILE_CONF_SUFFIX" ->
DRIVER_CREDENTIALS_CLIENT_KEY_PATH,
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CLIENT_CERT_FILE_CONF_SUFFIX" ->
DRIVER_CREDENTIALS_CLIENT_CERT_PATH,
s"$KUBERNETES_AUTH_DRIVER_MOUNTED_CONF_PREFIX.$CA_CERT_FILE_CONF_SUFFIX" ->
DRIVER_CREDENTIALS_CA_CERT_PATH)
assert(resolvedProperties === expectedSparkConf)
assert(kubernetesCredentialsStep.getAdditionalKubernetesResources().size === 1)
val credentialsSecret = kubernetesCredentialsStep
.getAdditionalKubernetesResources()
.head
.asInstanceOf[Secret]
assert(credentialsSecret.getMetadata.getName ===
s"$KUBERNETES_RESOURCE_NAME_PREFIX-kubernetes-credentials")
val decodedSecretData = credentialsSecret.getData.asScala.map { data =>
(data._1, new String(BaseEncoding.base64().decode(data._2), Charsets.UTF_8))
}
val expectedSecretData = Map(
DRIVER_CREDENTIALS_CA_CERT_SECRET_NAME -> "ca-cert",
DRIVER_CREDENTIALS_OAUTH_TOKEN_SECRET_NAME -> "token",
DRIVER_CREDENTIALS_CLIENT_KEY_SECRET_NAME -> "key",
DRIVER_CREDENTIALS_CLIENT_CERT_SECRET_NAME -> "cert")
assert(decodedSecretData === expectedSecretData)
val driverPod = kubernetesCredentialsStep.configurePod(BASE_DRIVER_POD)
val driverPodVolumes = driverPod.pod.getSpec.getVolumes.asScala
assert(driverPodVolumes.size === 1)
assert(driverPodVolumes.head.getName === DRIVER_CREDENTIALS_SECRET_VOLUME_NAME)
assert(driverPodVolumes.head.getSecret != null)
assert(driverPodVolumes.head.getSecret.getSecretName === credentialsSecret.getMetadata.getName)
val driverContainerVolumeMount = driverPod.container.getVolumeMounts.asScala
assert(driverContainerVolumeMount.size === 1)
assert(driverContainerVolumeMount.head.getName === DRIVER_CREDENTIALS_SECRET_VOLUME_NAME)
assert(driverContainerVolumeMount.head.getMountPath === DRIVER_CREDENTIALS_SECRETS_BASE_DIR)
}
private def writeCredentials(credentialsFileName: String, credentialsContents: String): File = {
val credentialsFile = new File(credentialsTempDirectory, credentialsFileName)
Files.write(credentialsContents, credentialsFile, Charsets.UTF_8)
credentialsFile
}
}
| ahnqirage/spark | resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverKubernetesCredentialsFeatureStepSuite.scala | Scala | apache-2.0 | 7,821 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe
import java.util.UUID
import wvlet.airframe.AirframeException.CYCLIC_DEPENDENCY
import wvlet.airframe.lifecycle.{AFTER_START, BEFORE_SHUTDOWN, ON_INIT, ON_INJECT, ON_SHUTDOWN, ON_START}
import wvlet.airframe.surface.Surface
import wvlet.log.LogSupport
object Binder {
sealed trait Binding extends Serializable {
def forSingleton: Boolean = false
def from: Surface
def sourceCode: SourceCode
}
case class ClassBinding(from: Surface, to: Surface, sourceCode: SourceCode) extends Binding {
if (from == to) {
throw new CYCLIC_DEPENDENCY(List(to), sourceCode)
}
}
case class SingletonBinding(from: Surface, to: Surface, isEager: Boolean, sourceCode: SourceCode) extends Binding {
override def forSingleton: Boolean = true
}
case class ProviderBinding(
factory: DependencyFactory,
provideSingleton: Boolean,
eager: Boolean,
sourceCode: SourceCode
) extends Binding {
assert(!eager || (eager && provideSingleton))
def from: Surface = factory.from
override def forSingleton: Boolean = provideSingleton
private val uuid: UUID = UUID.randomUUID()
override def hashCode(): Int = { uuid.hashCode() }
override def equals(other: Any): Boolean = {
other match {
case that: ProviderBinding =>
// Scala 2.12 generates Lambda for Function0, and the class might be generated every time, so
// comparing functionClasses doesn't work
(that canEqual this) && this.uuid == that.uuid
case _ => false
}
}
}
case class DependencyFactory(from: Surface, dependencyTypes: Seq[Surface], factory: Any) {
override def toString: String = {
val deps = if (dependencyTypes.isEmpty) {
"()"
} else {
s"(${dependencyTypes.mkString(",")})"
}
s"${deps}=>${from} [${factory}]"
}
def create(args: Seq[Any]): Any = {
require(args.length == dependencyTypes.length)
args.length match {
case 0 =>
// We need to copy the F0 instance in order to make Design immutable
factory.asInstanceOf[LazyF0[_]].copy.eval
case 1 =>
factory.asInstanceOf[Any => Any](args(0))
case 2 =>
factory.asInstanceOf[(Any, Any) => Any](args(0), args(1))
case 3 =>
factory.asInstanceOf[(Any, Any, Any) => Any](args(0), args(1), args(2))
case 4 =>
factory.asInstanceOf[(Any, Any, Any, Any) => Any](args(0), args(1), args(2), args(3))
case 5 =>
factory.asInstanceOf[(Any, Any, Any, Any, Any) => Any](args(0), args(1), args(2), args(3), args(4))
case other =>
throw new IllegalStateException("Should never reach")
}
}
}
}
import wvlet.airframe.Binder._
/**
*/
class Binder[A](val design: Design, val from: Surface, val sourceCode: SourceCode) extends BinderImpl[A] {
/**
* Bind the type to a given instance. The instance will be instantiated as an eager singleton when creating a
* session. Note that as you create a new session, new instance will be generated.
*
* @param any
* @return
*/
def toInstance(any: => A): DesignWithContext[A] = {
trace(s"binder toInstance: ${from}")
val binding =
ProviderBinding(DependencyFactory(from, Seq.empty, LazyF0(any).asInstanceOf[Any]), true, true, sourceCode)
design.addBinding[A](binding)
}
/**
* Bind an instance lazily (no singleton). This is used internally for implementing bindFactory[I1 => A]
*
* @param any
* @return
*/
def toLazyInstance(any: => A): DesignWithContext[A] = {
trace(s"binder toLazyInstance: ${from}")
design.addBinding[A](
ProviderBinding(DependencyFactory(from, Seq.empty, LazyF0(any).asInstanceOf[Any]), false, false, sourceCode)
)
}
def toSingleton: DesignWithContext[A] = {
design.addBinding[A](SingletonBinding(from, from, false, sourceCode))
}
def toEagerSingleton: DesignWithContext[A] = {
design.addBinding[A](SingletonBinding(from, from, true, sourceCode))
}
def onInit(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_INIT, from, body.asInstanceOf[Any => Unit]))
}
def onInject(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_INJECT, from, body.asInstanceOf[Any => Unit]))
}
def onStart(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_START, from, body.asInstanceOf[Any => Unit]))
}
def afterStart(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(AFTER_START, from, body.asInstanceOf[Any => Unit]))
}
def beforeShutdown(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(BEFORE_SHUTDOWN, from, body.asInstanceOf[Any => Unit]))
}
def onShutdown(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_SHUTDOWN, from, body.asInstanceOf[Any => Unit]))
}
}
/**
* DesignWithContext[A] is a wrapper of Design class for chaining lifecycle hooks for the same type A. This can be
* safely cast to just Design
*/
class DesignWithContext[A](
design: Design,
lastSurface: Surface
) extends Design(design.designOptions, design.binding, design.hooks) {
def onInit(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_INIT, lastSurface, body.asInstanceOf[Any => Unit]))
}
def onInject(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_INJECT, lastSurface, body.asInstanceOf[Any => Unit]))
}
def onStart(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_START, lastSurface, body.asInstanceOf[Any => Unit]))
}
def afterStart(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(AFTER_START, lastSurface, body.asInstanceOf[Any => Unit]))
}
def beforeShutdown(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(BEFORE_SHUTDOWN, lastSurface, body.asInstanceOf[Any => Unit]))
}
def onShutdown(body: A => Unit): DesignWithContext[A] = {
design.withLifeCycleHook[A](LifeCycleHookDesign(ON_SHUTDOWN, lastSurface, body.asInstanceOf[Any => Unit]))
}
}
| wvlet/airframe | airframe-di/src/main/scala/wvlet/airframe/Binder.scala | Scala | apache-2.0 | 6,996 |
package org.everpeace.scalamata
object CompositionExample extends App {
import org.everpeace.scalamata._
sealed abstract class S
case object S0 extends S
case object S1 extends S
case object S2 extends S
case object Sink extends S
def σ(c: Char)
= (s: S, x: Char) => (s, x) match {
case (S0, _c) if c == _c => S1
case (S0, _) => Sink
case (S1, _c) if c == _c => S2
case (S1, _) => Sink
case (S2, _) => Sink
case (Sink, _) => Sink
}
val only_a2 = DFA(σ('a'), S0, Set[S](S2))
val only_b2 = DFA(σ('b'), S0, Set[S](S2))
println("check Automata for aa")
check("", only_a2)
check("a", only_a2)
check("a" * 2, only_a2)
check("a" * 3, only_a2)
println("\\ncheck Automata for aabb by combyning two automata for aa and for bb")
check("a" * 1 + "b" * 1, only_a2 >> only_b2)
check("a" * 2 + "b" * 2, only_a2 >> only_b2)
check("a" * 3 + "b" * 3, only_a2 >> only_b2)
println("\\ncheck Automata for aa|bb by combyning two automata for aa and for bb")
check("a" * 1, only_a2 || only_b2)
check("a" * 2, only_a2 || only_b2)
check("b" * 1, only_a2 || only_b2)
check("b" * 2, only_a2 || only_b2)
println("\\ncheck Automata for complement of aa")
check("", !only_a2)
check("a", !only_a2)
check("a" * 2, !only_a2)
check("ab", !only_a2)
check("a" * 3, !only_a2)
check("a" * 4, !only_a2)
println("\\ncheck Automata for aa?")
check("", only_a2 ?)
check("a", only_a2 ?)
check("a" * 2, only_a2 ?)
println("\\ncheck Automata for (aa)*")
check("", only_a2 *)
check("a", only_a2 *)
check("a" * 2, only_a2 *)
check("a" * 3, only_a2 *)
check("a" * 4, only_a2 *)
check("a" * 5, only_a2 *)
check("a" * 6, only_a2 *)
println("\\ncheck Automata for aa+ (=aa(aa)*)")
check("", only_a2 +)
check("a", only_a2 +)
check("a" * 2, only_a2 +)
check("a" * 3, only_a2 +)
check("a" * 4, only_a2 +)
check("a" * 5, only_a2 +)
check("a" * 6, only_a2 +)
def check(input: String, a: Automata[_, Char]) = println("%-6s".format(input) + "=> " + a.accept(input.toSeq))
} | everpeace/scalamata | examples/src/main/scala/org/everpeace/scalamata/CompositionExample.scala | Scala | mit | 2,055 |
package com.twitter.diffy.analysis
import javax.inject.Inject
import com.twitter.diffy.compare.{Difference, PrimitiveDifference}
import com.twitter.diffy.lifter.{JsonLifter, Message}
import com.twitter.diffy.proxy.DifferenceConf
import com.twitter.diffy.thriftscala._
import com.twitter.finagle.tracing.Trace
import com.twitter.logging._
import com.twitter.util.{Future, Time}
import com.twitter.util.StorageUnitConversions._
import scala.util.Random
object DifferenceAnalyzer {
val UndefinedEndpoint = Some("Undefined_endpoint")
val log = Logger(classOf[DifferenceAnalyzer])
log.setUseParentHandlers(false)
log.addHandler(
FileHandler(
filename = "differences.log",
rollPolicy = Policy.MaxSize(128.megabytes),
rotateCount = 2
)()
)
def normalizeEndpointName(name: String) = name.replace("/", "-")
}
case class Field(endpoint: String, prefix: String)
class DifferenceAnalyzer @Inject()(
rawCounter: RawDifferenceCounter,
noiseCounter: NoiseDifferenceCounter,
store: InMemoryDifferenceCollector)
{
import DifferenceAnalyzer._
def apply(
request: Message,
candidate: Message,
primary: Message,
secondary: Message,
differenceConf: DifferenceConf
): Unit = {
getEndpointName(request.endpoint, candidate.endpoint,
primary.endpoint, secondary.endpoint) foreach { endpointName =>
// If there is no traceId then generate our own
val id = Trace.idOption map { _.traceId.toLong } getOrElse(Random.nextLong)
val rawDiff = Difference(primary, candidate, differenceConf).flattened
val noiseDiff = Difference(primary, secondary, differenceConf).flattened
rawCounter.counter.count(endpointName, rawDiff)
noiseCounter.counter.count(endpointName, noiseDiff)
if (rawDiff.size > 0) {
val diffResult = DifferenceResult(
id,
Trace.idOption map { _.traceId.toLong },
endpointName,
Time.now.inMillis,
differencesToJson(rawDiff),
JsonLifter.encode(request.result),
Responses(
candidate = JsonLifter.encode(candidate.result),
primary = JsonLifter.encode(primary.result),
secondary = JsonLifter.encode(secondary.result)
)
)
log.clearHandlers();
log.addHandler( FileHandler(
filename = normalizeEndpointName(endpointName) + "Differences.log",
rollPolicy = Policy.MaxSize(128.megabytes),
rotateCount = 2
)())
log.info(s"diff[$id]=$diffResult")
store.create(diffResult)
} else {
log.debug(s"diff[$id]=NoDifference")
}
}
}
def clear(): Future[Unit] =
Future.join(
rawCounter.counter.clear(),
noiseCounter.counter.clear(),
store.clear()
) map { _ => () }
def differencesToJson(diffs: Map[String, Difference]): Map[String, String] =
diffs map {
case (field, diff @ PrimitiveDifference(_: Long, _)) =>
field ->
JsonLifter.encode(
diff.toMap map {
case (k, v) => k -> v.toString
}
)
case (field, diff) => field -> JsonLifter.encode(diff.toMap)
}
private[this] def getEndpointName(
requestEndpoint: Option[String],
candidateEndpoint: Option[String],
primaryEndpoint: Option[String],
secondaryEndpoint: Option[String]): Option[String] = {
val rawEndpointName = (requestEndpoint, candidateEndpoint, primaryEndpoint, secondaryEndpoint) match {
case (Some(_), _, _, _) => requestEndpoint
// undefined endpoint when action header is missing from all three instances
case (_, None, None, None) => UndefinedEndpoint
// the assumption is that primary and secondary should call the same endpoint,
// otherwise it's noise and we should discard the request
case (_, None, _, _) if primaryEndpoint == secondaryEndpoint => primaryEndpoint
case (_, None, _, _) => None
case (_, Some(candidate), _, _) => candidateEndpoint
}
rawEndpointName map { normalizeEndpointName(_) }
}
}
| ljbx/diffy | src/main/scala/com/twitter/diffy/analysis/DifferenceCollector.scala | Scala | apache-2.0 | 4,105 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.resources.icons
import java.awt._
import java.awt.geom.Rectangle2D
import java.awt.image.BufferedImage
import javax.swing.ImageIcon
import jiconfont.icons.{Elusive, Entypo, FontAwesome}
import jiconfont.{IconCode, IconFont}
import scalismo.ui.view.util.Constants
object FontIcon {
// this is a fully transparent white, so not a color we'll encounter.
val RainbowColor: Color = new Color(0x00ffffff, true)
// Map of supported icon fonts.
private val fonts: Map[String, Font] = {
def createTuple(iconFont: IconFont): (String, Font) = {
(iconFont.getFontFamily, Font.createFont(Font.TRUETYPE_FONT, iconFont.getFontInputStream))
}
// this is where a new supported icon font would have to be added.
val iconFonts = Seq(FontAwesome.getIconFont, Entypo.getIconFont, Elusive.getIconFont)
iconFonts.map(createTuple).toMap
}
/**
* can be used to generate a FontAwesome IconCode
* that is not defined as a constant in the FontAwesome class.
*
* @param char a Unicode character, for example '\\uf1e3'
* @return an IconCode bound to the FontAwesome font, usable for the [[load]] method
*/
def awesome(char: Char): IconCode = generic(char, FontAwesome.getIconFont)
def generic(char: Char, iconFont: IconFont): IconCode = new IconCode {
override def getUnicode: Char = char
override def getFontFamily: String = iconFont.getFontFamily
override def name(): String = "generated"
}
def load(code: IconCode,
width: Int = Constants.StandardUnscaledIconSize,
height: Int = Constants.StandardUnscaledIconSize,
color: Color = Color.BLACK): FontIcon = {
val string = Character.toString(code.getUnicode)
val image = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB)
val graphics: Graphics2D = image.createGraphics()
val baseFont = fonts(code.getFontFamily)
val needed = new Dimension
var font = baseFont
var bounds: Rectangle2D = null
var actualSize: Float = height
// loop until we find a font size where the character will fit completely into the requested size
do {
font = baseFont.deriveFont(actualSize)
val metrics = graphics.getFontMetrics(font)
bounds = metrics.getStringBounds(string, graphics)
needed.width = Math.ceil(bounds.getWidth).toInt
needed.height = Math.ceil(bounds.getHeight).toInt
actualSize -= .5f
} while (needed.width > width || needed.height > height)
// we might be smaller than requested (normally in at most one dimension), so adjust for that
val xOffset: Float = (width - needed.width).toFloat / 2.0f
val yOffset: Float = (height - needed.height).toFloat / 2.0f
// now draw the text
graphics.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON)
graphics.setRenderingHint(RenderingHints.KEY_FRACTIONALMETRICS, RenderingHints.VALUE_FRACTIONALMETRICS_ON)
graphics.setColor(if (color == RainbowColor) Color.BLACK else color)
graphics.setFont(font)
// the y position also needs to take into account how far the character goes below the baseline
graphics.drawString(string, xOffset, yOffset + Math.abs(bounds.getY.toFloat))
graphics.dispose()
if (color == RainbowColor) {
// adapted from: http://www.java2s.com/Code/Java/2D-Graphics-GUI/RainbowColor.htm
def rainbow(x: Int, y: Int) = {
val red = (x * 255) / (height - 1)
val green = (y * 255) / (width - 1)
// the higher the "blue" value, the brighter the overall image
val blue = 160
((red << 16) | (green << 8) | blue) | 0xff000000
}
(0 until width).foreach { x =>
(0 until height).foreach { y =>
val alpha = (image.getRGB(x, y) >> 24) & 0xff
// the higher the alpha threshold, the thinner the resulting icon will appear
if (alpha > 128) {
image.setRGB(x, y, rainbow(x, y))
}
}
}
}
new FontIcon(code, color, image)
}
}
class FontIcon(val code: IconCode, val color: Color, image: Image) extends ImageIcon(image) with ScalableIcon {
override def resize(width: Int, height: Int): FontIcon = {
FontIcon.load(code, width, height, color)
}
def colored(newColor: Color): FontIcon = {
FontIcon.load(code, getIconWidth, getIconHeight, newColor)
}
}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/resources/icons/FontIcon.scala | Scala | gpl-3.0 | 5,122 |
package me.frmr.stripe
import net.liftweb.common._
import net.liftweb.json._
import JsonDSL._
import Extraction._
import net.liftweb.util.Helpers._
import dispatch._, Defaults._
case class Coupon(
id: String,
livemode: Boolean,
created: Long,
duration: String,
amountOff: Option[Long],
currency: Option[String],
durationInMonths: Option[Int],
maxRedemptions: Option[Int],
percentOff: Option[Int],
redeemBy: Option[Long],
timesRedeemed: Int,
valid: Boolean,
metadata: Map[String, String],
raw: Option[JValue] = None
) extends StripeObject {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
object Coupon extends Listable[CouponList] with Gettable[Coupon] with Deleteable {
def baseResourceCalculator(req: Req) =
req / "coupons"
def create(
duration: String,
id: Option[String] = None,
amountOff: Option[Long] = None,
currency: Option[String] = None,
durationInMonths: Option[Int] = None,
maxRedemptions: Option[Int] = None,
percentOff: Option[Int] = None,
redeemBy: Option[Long] = None,
metadata: Map[String, String] = Map.empty
)(implicit exec: StripeExecutor) = {
val requiredParams = Map("duration" -> duration)
val optionalParams = List(
id.map(("id", _)),
amountOff.map(a => ("amount_off", a.toString)),
currency.map(c => ("currency", c.toString)),
durationInMonths.map(d => ("duration_in_months", d.toString)),
maxRedemptions.map(m => ("max_redemptions", m.toString)),
percentOff.map(p => ("percent_off", p.toString)),
redeemBy.map(r => ("redeem_by", r.toString))
).flatten.toMap
val params = requiredParams ++ optionalParams ++ metadataProcessor(metadata)
val uri = baseResourceCalculator(exec.baseReq)
exec.executeFor[Coupon](uri << params)
}
def update(
id: String,
metadata: Map[String, String]
)(implicit exec: StripeExecutor) = {
val params = metadataProcessor(metadata)
val uri = baseResourceCalculator(exec.baseReq) / id
exec.executeFor[Coupon](uri << params)
}
}
| farmdawgnation/streifen | src/main/scala/me/frmr/stripe/Coupon.scala | Scala | apache-2.0 | 2,068 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.communication.security
import akka.actor.Actor
import org.apache.toree.communication.utils.OrderedSupport
import org.apache.toree.utils.LogLike
/**
* Verifies whether or not a kernel message has a valid signature.
* @param hmac The HMAC to use for signature validation
*/
class SignatureCheckerActor(
private val hmac: Hmac
) extends Actor with LogLike with OrderedSupport {
override def receive: Receive = {
case (signature: String, blob: Seq[_]) => withProcessing {
val stringBlob: Seq[String] = blob.map(_.toString)
val hmacString = hmac(stringBlob: _*)
val isValidSignature = hmacString == signature
logger.trace(s"Signature ${signature} validity checked against " +
s"hmac ${hmacString} with outcome ${isValidSignature}")
sender ! isValidSignature
}
}
/**
* Defines the types that will be stashed by [[waiting]]
* while the Actor is in processing state.
* @return
*/
override def orderedTypes(): Seq[Class[_]] = Seq(classOf[(String, Seq[_])])
}
| Myllyenko/incubator-toree | communication/src/main/scala/org/apache/toree/communication/security/SignatureCheckerActor.scala | Scala | apache-2.0 | 1,863 |
package pl.touk.nussknacker.engine.avro.source
import cats.data.Validated.Valid
import org.apache.avro.specific.SpecificRecord
import pl.touk.nussknacker.engine.api.context.ValidationContext
import pl.touk.nussknacker.engine.api.context.transformation.{DefinedEagerParameter, NodeDependencyValue}
import pl.touk.nussknacker.engine.api.process.ProcessObjectDependencies
import pl.touk.nussknacker.engine.api.typed.typing.Typed
import pl.touk.nussknacker.engine.avro.schemaregistry.SchemaRegistryProvider
import pl.touk.nussknacker.engine.avro.{AvroUtils, RuntimeSchemaData}
import pl.touk.nussknacker.engine.api.NodeId
import pl.touk.nussknacker.engine.kafka.source.KafkaSourceFactory.KafkaSourceImplFactory
import scala.reflect._
/**
* Source factory for specific records - mainly generated from schema.
*/
class SpecificRecordKafkaAvroSourceFactory[V <: SpecificRecord: ClassTag](schemaRegistryProvider: SchemaRegistryProvider,
processObjectDependencies: ProcessObjectDependencies,
implProvider: KafkaSourceImplFactory[Any, V])
extends KafkaAvroSourceFactory[Any, V](schemaRegistryProvider, processObjectDependencies, implProvider) {
override def contextTransformation(context: ValidationContext, dependencies: List[NodeDependencyValue])(implicit nodeId: NodeId): NodeTransformationDefinition =
topicParamStep orElse {
case step@TransformationStep((`topicParamName`, DefinedEagerParameter(topic:String, _)) :: Nil, _) =>
val preparedTopic = prepareTopic(topic)
val clazz = classTag[V].runtimeClass.asInstanceOf[Class[V]]
val schemaData = RuntimeSchemaData(AvroUtils.extractAvroSpecificSchema(clazz), None)
prepareSourceFinalResults(preparedTopic, Valid((Some(schemaData), Typed.typedClass(clazz))), context, dependencies, step.parameters, Nil)
case step@TransformationStep((`topicParamName`, _) :: Nil, _) =>
prepareSourceFinalErrors(context, dependencies, step.parameters, List.empty)
}
}
| TouK/nussknacker | utils/avro-components-utils/src/main/scala/pl/touk/nussknacker/engine/avro/source/SpecificRecordKafkaAvroSourceFactory.scala | Scala | apache-2.0 | 2,113 |
//
// Logger.scala -- Scala object Logger
// Project OrcScala
//
// $Id: Logger.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by jthywiss on Aug 21, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.script
/** Logger for the orc.script subsystem
*
* @author jthywiss
*/
object Logger extends orc.util.Logger("orc.script")
| laurenyew/cOrcS | src/orc/script/Logger.scala | Scala | bsd-3-clause | 605 |
package com.twitter.finagle.protobuf.rpc.channel
class ServerSideDecoder(val repo: MethodLookup, val service: Service) extends FrameDecoder with ProtobufDecoder {
@throws(classOf[Exception])
def decode(ctx: ChannelHandlerContext, channel: Channel, buf: ChannelBuffer): Object = {
decode(ctx, channel, buf, repo)
}
def getPrototype(methodName: String): Message = {
val m = service.getDescriptorForType().findMethodByName(methodName)
service.getRequestPrototype(m)
}
}
| jamescway/finagle | finagle-protobuf/src/main/scala/com/twitter/finagle/protobuf/rpc/channel/ServerSideDecoder.scala | Scala | apache-2.0 | 493 |
package com.thetestpeople.trt.analysis
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import com.thetestpeople.trt.model._
import com.thetestpeople.trt.model.impl.DummyData
import com.github.nscala_time.time.Imports._
object QuickTestAnalyserTest {
val Tolerance = 0.001
}
@RunWith(classOf[JUnitRunner])
class QuickTestAnalyserTest extends FlatSpec with Matchers {
import QuickTestAnalyserTest._
"Quick Test Analyser" should "let you compute results over time" in {
val testAnalyser =
quickTestAnalyser(
pass((2 * 24).hours.ago),
pass((4 * 24).hours.ago),
fail((6 * 24).hours.ago))
testAnalyser.ignoreExecutionsAfter(1.day.ago)
val Some(block1) = testAnalyser.getMostRecentPassFailBlock
block1.passed should be(true)
block1.count should be(2)
days(block1.duration) should be(2.0 +- Tolerance)
testAnalyser.ignoreExecutionsAfter(3.days.ago)
val Some(block2) = testAnalyser.getMostRecentPassFailBlock
block2.passed should be(true)
block2.count should be(1)
block2.duration.getStandardDays should be(0)
testAnalyser.ignoreExecutionsAfter(5.days.ago)
val Some(block3) = testAnalyser.getMostRecentPassFailBlock
block3.passed should be(false)
block3.count should be(1)
block3.duration.getStandardDays should be(0)
}
private def quickTestAnalyser(executions: ExecutionLite*) = new QuickTestAnalyser(executions.toArray)
private def pass(executionTime: DateTime) = execution(passed = true, executionTime = executionTime)
private def fail(executionTime: DateTime) = execution(passed = false, executionTime = executionTime)
private def execution(executionTime: DateTime, passed: Boolean) =
ExecutionLite(
configuration = DummyData.Configuration1,
testId = Id[Test](0),
executionTime = executionTime,
passed = passed)
private def days(duration: Duration) = duration.getMillis.toDouble / 1000 / 60 / 60 / 24
} | thetestpeople/trt | test/com/thetestpeople/trt/analysis/QuickTestAnalyserTest.scala | Scala | mit | 2,003 |
package stepping
import debug.Helper._
object AnonFunOnListInt {
def main(args: Array[String]) {
val l = List(1, 2, 4, 8)
l.foreach(noop(_))
l.find(_ == 3)
l.map(ret(_))
l.foldLeft(0)(_ + ret(_))
l foreach { i =>
noop(i)
}
}
}
class AnonFunOnListInt {} | espinhogr/ensime-server | testing/debug/src/main/scala/stepping/AnonFunOnListInt.scala | Scala | gpl-3.0 | 302 |
package hyperion
import io.restassured.RestAssured.given
import org.hamcrest.Matchers._
class HistorySpec extends BaseIntegrationSpec {
"The History API" should {
"expose meter readings by date" should {
"return meter reading if data is present" in {
given().
port(port).
when().
get("/history?date=2017-01-01").
`then`().
statusCode(200).
body("recordDate", equalTo("2017-01-01")).
body("gas", is(1.5f)).
body("electricityNormal", equalTo(2.6f)).
body("electricityLow", equalTo(3.7f))
}
"return \\'Not Found\\' if no data is present" in {
given().
port(port).
when().
get("/history?date=2016-12-31").
`then`().
statusCode(404)
}
}
"expose meter readings by month" should {
"return meter reading if data is present" in {
given().
port(port).
when().
get("/history?month=01&year=2017").
`then`().
statusCode(200).
body("[0].recordDate", equalTo("2017-01-01")).
body("[0].gas", is(1.5f)).
body("[0].electricityNormal", equalTo(2.6f)).
body("[0].electricityLow", equalTo(3.7f))
}
"return \\'Not Found\\' if no data is present" in {
given().
port(port).
when().
get("/history?month=12&year=2016").
`then`().
statusCode(404)
}
}
}
} | mthmulders/hyperion | integration-test/src/test/scala/hyperion/HistorySpec.scala | Scala | mit | 1,500 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.lang.reflect.InvocationTargetException
import org.slf4j.{Logger, LoggerFactory => LogManager}
/**
* Calling init registers "com.twitter.scalding" as a "tracing boundary" for Cascading. That means that when
* Cascading sends trace information to a DocumentService such as Driven, the trace will have information
* about the caller of Scalding instead of about the internals of Scalding. com.twitter.scalding.Job and its
* subclasses will automatically initialize Tracing.
*
* register and unregister methods are provided for testing, but should not be needed for most development
*/
object Tracing {
private val LOG: Logger = LogManager.getLogger(this.getClass)
// TODO: remove this once we no longer want backwards compatibility
// with cascading versions pre 2.6
private val traceUtilClassName = "cascading.util.TraceUtil"
/**
* Put a barrier at com.twitter.scalding, but exclude things like Tool that are common entry points for
* calling user code
*/
private val defaultRegex = """^com\\.twitter\\.scalding\\.(?!Tool|Job|ExecutionContext).*"""
register()
/**
* Forces the initialization of the Tracing object which in turn causes the one time registration of
* "com.twitter.scalding" as a tracing boundary in Cascading
*/
def init(): Unit = { /* do nothing */ }
/**
* Explicitly registers "com.twitter.scalding" as a Cascading tracing boundary. Normally not needed, but may
* be useful after a call to unregister()
*/
def register(regex: String = defaultRegex) =
invokeStaticMethod(traceUtilClassName, "registerApiBoundary", regex)
/**
* Unregisters "com.twitter.scalding" as a Cascading tracing bounardy. After calling this, Cascading
* DocumentServices such as Driven will show nodes as being created by Scalding class such as RichPipe
* instead of end user written code. This should normally not be called but can be useful in testing the
* development of Scalding internals
*/
def unregister(regex: String = defaultRegex) =
invokeStaticMethod(traceUtilClassName, "unregisterApiBoundary", regex)
/**
* Use reflection to register/unregister tracing boundaries so that cascading versions prior to 2.6 can be
* used without completely breaking
*/
private def invokeStaticMethod(clazz: String, methodName: String, args: AnyRef*): Unit =
try {
val argTypes = args.map(_.getClass())
Class.forName(clazz).getMethod(methodName, argTypes: _*).invoke(null, args: _*)
} catch {
case e @ (_: NoSuchMethodException | _: SecurityException | _: IllegalAccessException |
_: IllegalArgumentException | _: InvocationTargetException | _: NullPointerException |
_: ClassNotFoundException) =>
LOG.warn(
"There was an error initializing tracing. " +
"Tracing information in DocumentServices such as Driven may point to Scalding code instead of " +
"user code. The most likely cause is a mismatch in Cascading library version. Upgrading the " +
"Cascading library to at least 2.6 should fix this issue.The cause was [" + e + "]"
)
}
}
| twitter/scalding | scalding-core/src/main/scala/com/twitter/scalding/Tracing.scala | Scala | apache-2.0 | 3,732 |
package uk.gov.gds.ier.model
case class Rank(
serviceNumber: Option[String],
rank: Option[String]) {
def toApiMap =
serviceNumber.map(serviceNumber => Map("servno" -> serviceNumber.toString)).getOrElse(Map.empty) ++
rank.map(rank => Map("rank" -> rank.toString)).getOrElse(Map.empty)
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/model/Rank.scala | Scala | mit | 306 |
package game.commanders.unite
import android.os.Bundle
import com.badlogic.gdx.backends.android._
class Main extends AndroidApplication {
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
val config = new AndroidApplicationConfiguration
config.useAccelerometer = false
config.useCompass = false
config.useWakelock = true
config.hideStatusBar = true
CommandersUnite.smoothMove = 8
initialize(new CommandersUnite, config)
}
}
| tommyettinger/CommandersUnite | commanders-unite/android/src/main/scala/Main.scala | Scala | mit | 499 |
package almond.kernel
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Path, Paths}
import java.util.UUID
import almond.channels.zeromq.ZeromqThreads
import almond.channels.{Channel, ConnectionParameters, Message => RawMessage}
import almond.interpreter.{IOInterpreter, Interpreter, InterpreterToIOInterpreter, Message}
import almond.interpreter.comm.DefaultCommHandler
import almond.interpreter.input.InputHandler
import almond.interpreter.messagehandlers.{CommMessageHandlers, InterpreterMessageHandlers, MessageHandler}
import almond.logger.LoggerContext
import almond.protocol.{Header, Protocol, Status, Connection => JsonConnection}
import cats.effect.IO
import fs2.concurrent.{Queue, SignallingRef}
import fs2.{Pipe, Stream}
import scala.concurrent.ExecutionContext
final case class Kernel(
interpreter: IOInterpreter,
backgroundMessagesQueue: Queue[IO, (Channel, RawMessage)],
mainQueue: Queue[IO, Option[Stream[IO, (Channel, RawMessage)]]],
backgroundCommHandlerOpt: Option[DefaultCommHandler],
inputHandler: InputHandler,
kernelThreads: KernelThreads,
logCtx: LoggerContext,
extraHandler: MessageHandler
) {
private lazy val log = logCtx(getClass)
def replies(requests: Stream[IO, (Channel, RawMessage)]): Stream[IO, (Channel, RawMessage)] = {
val exitSignal = {
implicit val shift = IO.contextShift(kernelThreads.scheduleEc) // or another ExecutionContext?
SignallingRef[IO, Boolean](false)
}
Stream.eval(exitSignal).flatMap { exitSignal0 =>
val interpreterMessageHandler = InterpreterMessageHandlers(
interpreter,
backgroundCommHandlerOpt,
Some(inputHandler),
kernelThreads.queueEc,
logCtx,
io => mainQueue.enqueue1(Some(Stream.eval_(io))),
exitSignal0
)
val commMessageHandler = backgroundCommHandlerOpt match {
case None =>
MessageHandler.empty
case Some(commHandler) =>
CommMessageHandlers(commHandler.commTargetManager, kernelThreads.queueEc, logCtx)
.messageHandler
}
// handlers whose messages are processed straightaway (no queueing to enforce sequential processing)
val immediateHandlers = inputHandler.messageHandler
.orElse(interpreterMessageHandler.completeHandler)
.orElse(commMessageHandler)
.orElse(interpreterMessageHandler.interruptHandler)
.orElse(interpreterMessageHandler.shutdownHandler)
.orElse(extraHandler)
// for w/e reason, these seem not to be processed on time by the Jupyter classic UI
// (don't know about lab, nteract seems fine, unless it just marks kernels as starting by itself)
val initStream = {
def sendStatus(status: Status) =
Stream(
Message(
Header(
UUID.randomUUID().toString,
"username",
UUID.randomUUID().toString, // Would there be a way to get the session id from the client?
Status.messageType.messageType,
Some(Protocol.versionStr)
),
Status.starting,
idents = List(Status.messageType.messageType.getBytes(UTF_8).toSeq)
).on(Channel.Publish)
)
val attemptInit = interpreter.init.attempt.flatMap { a =>
for (e <- a.left)
log.error("Error initializing interpreter", e)
IO.fromEither(a)
}
sendStatus(Status.starting) ++
sendStatus(Status.busy) ++
Stream.eval_(attemptInit) ++
sendStatus(Status.idle)
}
val mainStream = {
val requests0 = {
implicit val shift = IO.contextShift(kernelThreads.scheduleEc)
requests.interruptWhen(exitSignal0)
}
// For each incoming message, an IO that processes it, and gives the response messages
val streams: Stream[IO, Stream[IO, (Channel, RawMessage)]] =
requests0.map {
case (channel, rawMessage) =>
interpreterMessageHandler.handler.handleOrLogError(channel, rawMessage, log) match {
case None =>
// interpreter message handler passes, try with the other handlers
immediateHandlers.handleOrLogError(channel, rawMessage, log) match {
case None =>
log.warn(s"Ignoring unhandled message:\n$rawMessage")
Stream.empty
case Some(output) =>
// process stdin messages and send response back straightaway
output
}
case Some(output) =>
// enqueue stream that processes the incoming message, so that the main messages are
// still processed and answered in order
Stream.eval_(mainQueue.enqueue1(Some(output)))
}
}
// Try to process messages eagerly, to e.g. process comm messages even while an execute_request is
// being processed.
// Order of the main messages and their answers is still preserved via mainQueue, see also comments above.
val mergedStreams = {
implicit val shift = IO.contextShift(kernelThreads.scheduleEc)
streams.parJoin(20) // https://twitter.com/mpilquist/status/943653692745666560
}
// Put poison pill (null) at the end of mainQueue when all input messages have been processed
val s1 = Stream.bracket(IO.unit)(_ => mainQueue.enqueue1(None))
.flatMap(_ => mergedStreams)
// Reponses for the main messages
val s2 = mainQueue
.dequeue
.takeWhile(_.nonEmpty)
.flatMap(s => s.getOrElse[Stream[IO, (Channel, RawMessage)]](Stream.empty))
// Merge s1 (messages answered straightaway and enqueuing of the main messages) and s2 (responses of main
// messages, that are processed sequentially via mainQueue)
{
implicit val shift = IO.contextShift(kernelThreads.scheduleEc)
s1.merge(s2)
}
}
// Put poison pill (null) at the end of backgroundMessagesQueue when all input messages have been processed
// and answered.
val mainStream0 = Stream.bracket(IO.unit)(_ => backgroundMessagesQueue.enqueue1(null))
.flatMap(_ => initStream ++ mainStream)
// Merge responses to all incoming messages with background messages (comm messages sent by user code when it
// is run)
{
implicit val shift = IO.contextShift(kernelThreads.scheduleEc)
mainStream0.merge(backgroundMessagesQueue.dequeue.takeWhile(_ != null))
}
}
}
def run(
stream: Stream[IO, (Channel, RawMessage)],
sink: Pipe[IO, (Channel, RawMessage), Unit]
): IO[Unit] =
sink(replies(stream)).compile.drain
def runOnConnection(
connection: ConnectionParameters,
kernelId: String,
zeromqThreads: ZeromqThreads
): IO[Unit] =
for {
c <- connection.channels(
bind = true,
zeromqThreads,
logCtx,
identityOpt = Some(kernelId)
)
_ <- c.open
_ <- run(c.stream(), c.autoCloseSink)
} yield ()
def runOnConnectionFile(
connectionPath: Path,
kernelId: String,
zeromqThreads: ZeromqThreads
): IO[Unit] =
for {
_ <- {
if (Files.exists(connectionPath))
IO.unit
else
IO.raiseError(new Exception(s"Connection file $connectionPath not found"))
}
_ <- {
if (Files.isRegularFile(connectionPath))
IO.unit
else
IO.raiseError(new Exception(s"Connection file $connectionPath not a regular file"))
}
connection <- JsonConnection.fromPath(connectionPath)
_ <- runOnConnection(
connection.connectionParameters,
kernelId,
zeromqThreads
)
} yield ()
def runOnConnectionFile(
connectionPath: String,
kernelId: String,
zeromqThreads: ZeromqThreads
): IO[Unit] =
runOnConnectionFile(
Paths.get(connectionPath),
kernelId,
zeromqThreads
)
}
object Kernel {
def create(
interpreter: Interpreter,
interpreterEc: ExecutionContext,
kernelThreads: KernelThreads,
logCtx: LoggerContext,
extraHandler: MessageHandler
): IO[Kernel] =
create(
new InterpreterToIOInterpreter(interpreter, interpreterEc, logCtx),
kernelThreads,
logCtx,
extraHandler
)
def create(
interpreter: Interpreter,
interpreterEc: ExecutionContext,
kernelThreads: KernelThreads,
logCtx: LoggerContext = LoggerContext.nop
): IO[Kernel] =
create(
interpreter,
interpreterEc,
kernelThreads,
logCtx,
MessageHandler.empty
)
def create(
interpreter: IOInterpreter,
kernelThreads: KernelThreads,
logCtx: LoggerContext,
extraHandler: MessageHandler
): IO[Kernel] =
for {
backgroundMessagesQueue <- {
implicit val shift = IO.contextShift(kernelThreads.queueEc)
Queue.bounded[IO, (Channel, RawMessage)](20) // FIXME Sizing
}
mainQueue <- {
implicit val shift = IO.contextShift(kernelThreads.queueEc)
Queue.bounded[IO, Option[Stream[IO, (Channel, RawMessage)]]](50) // FIXME Sizing
}
backgroundCommHandlerOpt <- IO {
if (interpreter.supportComm)
Some {
val h = new DefaultCommHandler(backgroundMessagesQueue, kernelThreads.commEc)
interpreter.setCommHandler(h)
h
}
else
None
}
inputHandler <- IO {
new InputHandler(kernelThreads.futureEc, logCtx)
}
} yield {
Kernel(
interpreter,
backgroundMessagesQueue,
mainQueue,
backgroundCommHandlerOpt,
inputHandler,
kernelThreads,
logCtx,
extraHandler
)
}
}
| alexarchambault/jupyter-scala | modules/shared/kernel/src/main/scala/almond/kernel/Kernel.scala | Scala | apache-2.0 | 9,980 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf._
/**
* Test cases for the builder pattern of [[SparkSession]].
*/
class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach {
override def afterEach(): Unit = {
// This suite should not interfere with the other test suites.
SparkSession.getActiveSession.foreach(_.stop())
SparkSession.clearActiveSession()
SparkSession.getDefaultSession.foreach(_.stop())
SparkSession.clearDefaultSession()
}
test("create with config options and propagate them to SparkContext and SparkSession") {
val session = SparkSession.builder()
.master("local")
.config(UI_ENABLED.key, value = false)
.config("some-config", "v2")
.getOrCreate()
assert(session.sparkContext.conf.get("some-config") == "v2")
assert(session.conf.get("some-config") == "v2")
}
test("use global default session") {
val session = SparkSession.builder().master("local").getOrCreate()
assert(SparkSession.builder().getOrCreate() == session)
}
test("sets default and active session") {
assert(SparkSession.getDefaultSession == None)
assert(SparkSession.getActiveSession == None)
val session = SparkSession.builder().master("local").getOrCreate()
assert(SparkSession.getDefaultSession == Some(session))
assert(SparkSession.getActiveSession == Some(session))
}
test("get active or default session") {
val session = SparkSession.builder().master("local").getOrCreate()
assert(SparkSession.active == session)
SparkSession.clearActiveSession()
assert(SparkSession.active == session)
SparkSession.clearDefaultSession()
intercept[IllegalStateException](SparkSession.active)
session.stop()
}
test("config options are propagated to existing SparkSession") {
val session1 = SparkSession.builder().master("local").config("spark-config1", "a").getOrCreate()
assert(session1.conf.get("spark-config1") == "a")
val session2 = SparkSession.builder().config("spark-config1", "b").getOrCreate()
assert(session1 == session2)
assert(session1.conf.get("spark-config1") == "b")
}
test("use session from active thread session and propagate config options") {
val defaultSession = SparkSession.builder().master("local").getOrCreate()
val activeSession = defaultSession.newSession()
SparkSession.setActiveSession(activeSession)
val session = SparkSession.builder().config("spark-config2", "a").getOrCreate()
assert(activeSession != defaultSession)
assert(session == activeSession)
assert(session.conf.get("spark-config2") == "a")
assert(session.sessionState.conf == SQLConf.get)
assert(SQLConf.get.getConfString("spark-config2") == "a")
SparkSession.clearActiveSession()
assert(SparkSession.builder().getOrCreate() == defaultSession)
}
test("create a new session if the default session has been stopped") {
val defaultSession = SparkSession.builder().master("local").getOrCreate()
SparkSession.setDefaultSession(defaultSession)
defaultSession.stop()
val newSession = SparkSession.builder().master("local").getOrCreate()
assert(newSession != defaultSession)
}
test("create a new session if the active thread session has been stopped") {
val activeSession = SparkSession.builder().master("local").getOrCreate()
SparkSession.setActiveSession(activeSession)
activeSession.stop()
val newSession = SparkSession.builder().master("local").getOrCreate()
assert(newSession != activeSession)
}
test("create SparkContext first then SparkSession") {
val conf = new SparkConf().setAppName("test").setMaster("local").set("key1", "value1")
val sparkContext2 = new SparkContext(conf)
val session = SparkSession.builder().config("key2", "value2").getOrCreate()
assert(session.conf.get("key1") == "value1")
assert(session.conf.get("key2") == "value2")
assert(session.sparkContext == sparkContext2)
// We won't update conf for existing `SparkContext`
assert(!sparkContext2.conf.contains("key2"))
assert(sparkContext2.conf.get("key1") == "value1")
}
test("create SparkContext first then pass context to SparkSession") {
val conf = new SparkConf().setAppName("test").setMaster("local").set("key1", "value1")
val newSC = new SparkContext(conf)
val session = SparkSession.builder().sparkContext(newSC).config("key2", "value2").getOrCreate()
assert(session.conf.get("key1") == "value1")
assert(session.conf.get("key2") == "value2")
assert(session.sparkContext == newSC)
assert(session.sparkContext.conf.get("key1") == "value1")
// If the created sparkContext is passed through the Builder's API sparkContext,
// the conf of this sparkContext will not contain the conf set through the API config.
assert(!session.sparkContext.conf.contains("key2"))
assert(session.sparkContext.conf.get("spark.app.name") == "test")
}
test("SPARK-15887: hive-site.xml should be loaded") {
val session = SparkSession.builder().master("local").getOrCreate()
assert(session.sessionState.newHadoopConf().get("hive.in.test") == "true")
assert(session.sparkContext.hadoopConfiguration.get("hive.in.test") == "true")
}
test("SPARK-15991: Set global Hadoop conf") {
val session = SparkSession.builder().master("local").getOrCreate()
val mySpecialKey = "my.special.key.15991"
val mySpecialValue = "msv"
try {
session.sparkContext.hadoopConfiguration.set(mySpecialKey, mySpecialValue)
assert(session.sessionState.newHadoopConf().get(mySpecialKey) == mySpecialValue)
} finally {
session.sparkContext.hadoopConfiguration.unset(mySpecialKey)
}
}
test("SPARK-31234: RESET command will not change static sql configs and " +
"spark context conf values in SessionState") {
val session = SparkSession.builder()
.master("local")
.config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31234")
.config("spark.app.name", "test-app-SPARK-31234")
.getOrCreate()
assert(session.sessionState.conf.getConfString("spark.app.name") === "test-app-SPARK-31234")
assert(session.sessionState.conf.getConf(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31234")
session.sql("RESET")
assert(session.sessionState.conf.getConfString("spark.app.name") === "test-app-SPARK-31234")
assert(session.sessionState.conf.getConf(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31234")
}
test("SPARK-31354: SparkContext only register one SparkSession ApplicationEnd listener") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test-app-SPARK-31354-1")
val context = new SparkContext(conf)
SparkSession
.builder()
.sparkContext(context)
.master("local")
.getOrCreate()
val postFirstCreation = context.listenerBus.listeners.size()
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
SparkSession
.builder()
.sparkContext(context)
.master("local")
.getOrCreate()
val postSecondCreation = context.listenerBus.listeners.size()
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
assert(postFirstCreation == postSecondCreation)
}
test("SPARK-31532: should not propagate static sql configs to the existing" +
" active/default SparkSession") {
val session = SparkSession.builder()
.master("local")
.config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31532")
.config("spark.app.name", "test-app-SPARK-31532")
.getOrCreate()
// do not propagate static sql configs to the existing active session
val session1 = SparkSession
.builder()
.config(GLOBAL_TEMP_DATABASE.key, "globalTempDB-SPARK-31532-1")
.getOrCreate()
assert(session.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532")
assert(session1.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532")
// do not propagate static sql configs to the existing default session
SparkSession.clearActiveSession()
val session2 = SparkSession
.builder()
.config(WAREHOUSE_PATH.key, "SPARK-31532-db")
.config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31532-2")
.getOrCreate()
assert(!session.conf.get(WAREHOUSE_PATH).contains("SPARK-31532-db"))
assert(session.conf.get(WAREHOUSE_PATH) === session2.conf.get(WAREHOUSE_PATH))
assert(session2.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532")
}
test("SPARK-31532: propagate static sql configs if no existing SparkSession") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test-app-SPARK-31532-2")
.set(GLOBAL_TEMP_DATABASE.key, "globaltempdb-spark-31532")
.set(WAREHOUSE_PATH.key, "SPARK-31532-db")
SparkContext.getOrCreate(conf)
// propagate static sql configs if no existing session
val session = SparkSession
.builder()
.config(GLOBAL_TEMP_DATABASE.key, "globalTempDB-SPARK-31532-2")
.config(WAREHOUSE_PATH.key, "SPARK-31532-db-2")
.getOrCreate()
assert(session.conf.get("spark.app.name") === "test-app-SPARK-31532-2")
assert(session.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532-2")
assert(session.conf.get(WAREHOUSE_PATH) === "SPARK-31532-db-2")
}
test("SPARK-32062: reset listenerRegistered in SparkSession") {
(1 to 2).foreach { i =>
val conf = new SparkConf()
.setMaster("local")
.setAppName(s"test-SPARK-32062-$i")
val context = new SparkContext(conf)
val beforeListenerSize = context.listenerBus.listeners.size()
SparkSession
.builder()
.sparkContext(context)
.getOrCreate()
val afterListenerSize = context.listenerBus.listeners.size()
assert(beforeListenerSize + 1 == afterListenerSize)
context.stop()
}
}
}
| dbtsai/spark | sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala | Scala | apache-2.0 | 10,952 |
package com.phasmid.hedge_fund.rules
/**
* @author robinhillyard
*/
case class Rule(predicate: Predicate) extends Predicate {
// TODO don't think we need to override this here
def apply(candidate: Candidate): Either[Throwable, Boolean] = predicate.apply(candidate)
}
object Rule {
// Note that this expression is tail-recursive. TODO [I don't think so!]
// That's to say that parentheses can be nested, provided that all adjacent closing parens
// appear at the termination of the string.
val rRule = """^\\(([^\\)]+)\\)\\s*(\\&|\\|)\\s*\\((.+)\\)$""".r
def apply(s: String): Predicate = s match {
case rRule(p1, "&", p2) => And(Predicate(p1), Rule(p2))
case rRule(p1, "|", p2) => Or(Predicate(p1), Rule(p2))
case _ => Predicate(s)
}
}
case class And(p1: Predicate, p2: Predicate) extends Predicate {
def apply(candidate: Candidate) = p1.apply(candidate) match {
case Right(x) => if (x) p2.apply(candidate) else Right(false)
case x => x
}
}
case class Or(p1: Predicate, p2: Predicate) extends Predicate {
def apply(candidate: Candidate) = p1.apply(candidate) match {
case Right(x) => if (x) Right(true) else p2.apply(candidate)
case x => x
}
} | rchillyard/Scalaprof | hedge-fund/src/main/scala/com/phasmid/hedge_fund/rules/Rule.scala | Scala | gpl-2.0 | 1,191 |
package org.jetbrains.jps.incremental.scala
package local
package zinc
import java.io.{PrintWriter, StringWriter}
import java.util.ServiceLoader
import org.jetbrains.jps.incremental.messages.BuildMessage.Kind
import org.jetbrains.jps.incremental.scala.data.CompilationData
import sbt.internal.inc.Analysis
import xsbti.compile.{AnalysisStore, CompileResult, MiniSetup, AnalysisContents}
import Utils._
import scala.util.Try
import scala.util.control.NonFatal
import scala.collection.JavaConverters._
case class CompilationMetadata(previousAnalysis: Analysis,
previousSetup: Option[MiniSetup],
cacheDetails: CacheStats
)(providers: Seq[CachedCompilationProvider]) {
val zincLogFilter: ZincLogFilter = {
val filters = providers.flatMap(_.zincLogFilter())
(severity: Kind, msg: String) => filters.forall(_.shouldLog(severity, msg))
}
def compilationFinished(compilationData: CompilationData,
result: Try[CompileResult],
classfilesChanges: ClassfilesChanges,
cacheStats: CacheStats): Unit =
providers.foreach(_.compilationFinished(result, classfilesChanges, cacheStats))
}
object CompilationMetadata {
private val cachedCompilationServices: List[CachedCompilationService] =
ServiceLoader.load(classOf[CachedCompilationService])
.iterator().asScala.toList
def load(localStore: AnalysisStore, client: Client, compilationData: CompilationData): CompilationMetadata = {
val analysisFromLocalStore = localStore.get()
val cacheLoadingStart = System.currentTimeMillis()
val cacheProviders = cachedCompilationServices.flatMap(_.createProvider(compilationData))
def cacheStats(description: String, isCached: Boolean) = {
val cacheLoadingEnd = System.currentTimeMillis()
CacheStats(description, cacheLoadingEnd - cacheLoadingStart, cacheLoadingEnd, isCached)
}
def notUseCache(description: String) = {
val (localAnalysis, localSetup) = if (analysisFromLocalStore.isPresent) {
val content = analysisFromLocalStore.get()
(content.getAnalysis.asInstanceOf[Analysis], Some(content.getMiniSetup))
} else (Analysis.Empty, None)
CompilationMetadata(localAnalysis, localSetup, cacheStats(description, isCached = false))(cacheProviders)
}
def loaderErrored(e: Throwable) = {
val logs = new StringWriter()
e.printStackTrace(new PrintWriter(logs))
e.printStackTrace(System.out) // It will become a warning
CacheResult(s"Exception when loading cache:\\n$logs", None)
}
val cachedResults: List[CacheResult] = cacheProviders.map{
provider =>
try provider.loadCache(analysisFromLocalStore.toOption) catch {
case NonFatal(e) =>
loaderErrored(e)
case e: ClassNotFoundException =>
loaderErrored(e)
case e: NoClassDefFoundError =>
loaderErrored(e)
}
}
// In case all caches are empty use we will get description from first one
val cacheToUse = cachedResults.find(_.content.nonEmpty) orElse cachedResults.headOption
cacheToUse match {
case Some(CacheResult(description, result)) =>
result match {
case Some(content: AnalysisContents) =>
CompilationMetadata(content.getAnalysis.asInstanceOf[Analysis], Some(content.getMiniSetup), cacheStats(description, isCached = true))(cacheProviders)
case Some(badFormat) =>
val cacheResultClass = badFormat.getClass.getName
client.warning(s"Unrecognized cache format: $badFormat (class $cacheResultClass)")
notUseCache(s"No cache: badFormat ($cacheResultClass): $description")
case _ =>
notUseCache(s"No cache: $description")
}
case _ =>
notUseCache("No cache found.")
}
}
}
| triplequote/intellij-scala | scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/local/zinc/CompilationMetadata.scala | Scala | apache-2.0 | 3,941 |
package scryetek.vecmath
final class Mat2(
var m00: Float, var m01: Float,
var m10: Float, var m11: Float) {
@inline
def set(
m00: Float = this.m00, m01: Float = this.m01,
m10: Float = this.m10, m11: Float = this.m11): Mat2 = {
this.m00 = m00; this.m01 = m01
this.m10 = m10; this.m11 = m11
this
}
@inline
def set(m: Mat2): Mat2 =
set(m.m00, m.m01,
m.m10, m.m11)
@inline
def this() = this(1, 0, 0, 1)
@inline
def determinant = m00*m11-m01*m10
@inline
def inverted = {
var det = determinant
assert(det != 0, "Matrix is not invertable")
det = 1/det
Mat2( m11*det, -m01*det,
-m10*det, m00*det)
}
@inline
def invert(out: Mat2): Mat2 = {
var det = determinant
assert(det != 0, "Matrix is not invertable")
det = 1/det
out.set( m11*det, -m01*det,
-m10*det, m00*det)
}
/** Returns the transpose of this matrix. */
@inline
def transposed =
Mat2(m00, m10,
m01, m11)
/** Transposes this matrix into the specified out matrix. */
@inline
def transpose(out: Mat2 = this): Mat2 =
out.set(m00, m10,
m01, m11)
/** Returns the adjoint of this matrix. */
@inline
def adjointed =
Mat2( m11, -m01,
-m10, m00)
/** Puts the adjoint of this matrix into the specified out matrix. */
@inline
def adjoint(out: Mat2): Mat2 =
out.set( m11, -m01,
-m10, m00)
/** Adds two matrices */
@inline
def +(m: Mat2): Mat2 =
Mat2(m00 + m.m00, m01 + m.m01,
m10 + m.m10, m11 + m.m11)
/** Destructively add another matrix to this matrix into the output matrix. */
@inline
def add(m: Mat2, out: Mat2 = this): Mat2 =
out.set(m00 + m.m00, m01 + m.m01,
m10 + m.m10, m11 + m.m11)
/** Adds two matrices */
@inline
def -(m: Mat2): Mat2 =
Mat2(m00 - m.m00, m01 - m.m01,
m10 - m.m10, m11 - m.m11)
/** Destructively add another matrix to this matrix into the output matrix. */
@inline
def sub(m: Mat2, out: Mat2 = this): Mat2 =
out.set(m00 - m.m00, m01 - m.m01,
m10 - m.m10, m11 - m.m11)
/** Transforms a 2-vector by this matrix. */
@inline
def *(v: Vec2): Vec2 =
Vec2(m00*v.x + m01*v.y, m10*v.x + m11*v.y)
/** Transforms a 2-vector into the specified out vector. */
@inline
def mul(v: Vec2, out: Vec2): Vec2 =
out.set(m00*v.x + m01*v.y, m10*v.x + m11*v.y)
/** Destructively transforms a 2-vector. */
@inline
def mul(v: Vec2): Vec2 =
mul(v,v)
/** Returns a scaled copy of this matrix. */
@inline
def *(s: Float): Mat2 =
Mat2(m00*s, m01*s,
m10*s, m11*s)
@inline
def scale(s: Float, out: Mat2 = this): Mat2 =
out.set(m00*s, m01*s,
m10*s, m11*s)
/** Returns the result of multiplying this matrix by another matrix. */
@inline
def *(m: Mat2): Mat2 =
Mat2(m00*m.m00 + m01*m.m10, m00*m.m01 + m01*m.m11,
m10*m.m00 + m11*m.m10, m10*m.m01 + m11*m.m11)
/** Copies the result of postmultiplying another matrix by this matrix into the specified output matrix. */
@inline
def postMultiply(m: Mat2, out: Mat2 = this): Mat2 =
out.set(m00*m.m00 + m01*m.m10, m00*m.m01 + m01*m.m11,
m10*m.m00 + m11*m.m10, m10*m.m01 + m11*m.m11)
/** Copies the result of premultiplying another matrix by this matrix into the specified output matrix. */
@inline
def preMultiply(m: Mat2, out: Mat2 = this): Mat2 =
out.set(m.m00*m00 + m.m01*m10, m.m00*m01 + m.m01*m11,
m.m10*m00 + m.m11*m10, m.m10*m01 + m.m11*m11)
def copy(m00: Float = m00, m01: Float = m01,
m10: Float = m10, m11: Float = m11) =
Mat2(m00, m01,
m10, m11)
override def toString =
s"Mat2(${m00}f,${m01}f,${m10}f,${m11}f)"
override def equals(o: Any): Boolean = o match {
case v: Mat2 => m00 == v.m00 && m01 == v.m01 && m10 == v.m10 && m11 == v.m11
case _ => false
}
override def hashCode: Int =
m00.hashCode() * 19 + m01.hashCode() * 23 +
m10.hashCode() * 31 + m11.hashCode() * 37
}
object Mat2 {
def apply() = new Mat2()
def apply(m00: Float, m01: Float, m10: Float, m11: Float) =
new Mat2(m00, m01, m10, m11)
@inline
def rotate(angle: Float): Mat2 = {
val c = math.cos(angle).toFloat
val s = math.sin(angle).toFloat
Mat2(c, -s,
s, c)
}
@inline
def scale(x: Float, y: Float): Mat2 =
Mat2(x, 0,
0, y)
@inline
def scale(s: Vec2): Mat2 =
scale(s.x, s.y)
} | mseddon/vecmath | shared/src/main/scala/scryetek/vecmath/Mat2.scala | Scala | bsd-3-clause | 4,491 |
object Test extends Function0[Int] {
// this and v resolve to Test.this, Test.v not A.this, A.v
class A(x: Function0[Int] = this)(val a: Int = v, val b: Int = v * x()) extends Function0[Int] {
val v = 3
override def toString = x.toString +", "+ a +", "+ b
// ordinary instance scope
def m(i: Int = v, y: Function0[Int] = this) = "m, "+ i +", "+ y()
def apply() = 19
}
object A {
val v = 5
// should happily coexist with default getters, in a happier world
def init(x: Function0[Int] = Test.this)(a: Int = v, b: Int = v * x()) = x.toString +", "+ a +", "+ b
override def toString = "A"
}
val v = 7
def apply() = 17
override def toString = "Test"
def main(args: Array[String]): Unit = {
val sut = new A()()
println(sut.toString)
println(sut.m())
println(A.init()())
println((new T.C()).x)
println((new T.D(0,0)).x)
}
}
object T {
override def toString = "T"
// `this` refers to T
class C(val x: Any = {println(this); this}) { // prints T
println(this) // prints C
override def toString() = "C"
}
class D(val x: Any) {
override def toString() = "D"
// `this` refers again to T
def this(a: Int, b: Int, c: Any = {println(this); this}) { this(c); println(this) } // prints T, then prints D
}
}
| martijnhoekstra/scala | test/files/run/t5543.scala | Scala | apache-2.0 | 1,306 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Martin Odersky
*/
package scala.tools.nsc.ast.parser
abstract class Change
case class Insertion(text: String) extends Change
case class Deletion(nchars: Int) extends Change
| felixmulder/scala | src/compiler/scala/tools/nsc/ast/parser/Change.scala | Scala | bsd-3-clause | 250 |
package com.frenchcoder.sip
import akka.actor.ActorRef
import javax.sip._
class SipEventForwarder(client: ActorRef) extends SipListener {
def processRequest(event: RequestEvent): Unit = {
}
def processResponse(event: ResponseEvent): Unit = {
}
def processDialogTerminated(event: DialogTerminatedEvent): Unit = {
}
def processIOException(event: IOExceptionEvent): Unit = {
println(event)
}
def processTimeout(event: TimeoutEvent): Unit = {
}
def processTransactionTerminated(event: TransactionTerminatedEvent): Unit = {
}
}
| jpthomasset/alfred | src/main/scala/com/frenchcoder/sip/SipEventForwarder.scala | Scala | apache-2.0 | 564 |
package neverlink
class A | bazelbuild/rules_scala | test/src/main/scala/scalarules/test/neverlink/A.scala | Scala | apache-2.0 | 26 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import scala.util.Random
import org.apache.spark.benchmark.Benchmark
/**
* Benchmark to measure read performance with Bloom filters.
*
* Currently, only ORC supports bloom filters, we will add Parquet BM as soon as it becomes
* available.
*
* To run this benchmark:
* {{{
* 1. without sbt: bin/spark-submit --class <this class>
* --jars <spark core test jar>,<spark catalyst test jar> <spark sql test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/BloomFilterBenchmark-results.txt".
* }}}
*/
object BloomFilterBenchmark extends SqlBasedBenchmark {
import spark.implicits._
private val scaleFactor = 100
private val N = scaleFactor * 1000 * 1000
private val df = spark.range(N).map(_ => Random.nextInt)
private def writeBenchmark(): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
runBenchmark(s"ORC Write") {
val benchmark = new Benchmark(s"Write ${scaleFactor}M rows", N, output = output)
benchmark.addCase("Without bloom filter") { _ =>
df.write.mode("overwrite").orc(path + "/withoutBF")
}
benchmark.addCase("With bloom filter") { _ =>
df.write.mode("overwrite")
.option("orc.bloom.filter.columns", "value").orc(path + "/withBF")
}
benchmark.run()
}
}
}
private def readBenchmark(): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
df.write.orc(path + "/withoutBF")
df.write.option("orc.bloom.filter.columns", "value").orc(path + "/withBF")
runBenchmark(s"ORC Read") {
val benchmark = new Benchmark(s"Read a row from ${scaleFactor}M rows", N, output = output)
benchmark.addCase("Without bloom filter") { _ =>
spark.read.orc(path + "/withoutBF").where("value = 0").noop()
}
benchmark.addCase("With bloom filter") { _ =>
spark.read.orc(path + "/withBF").where("value = 0").noop()
}
benchmark.run()
}
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
writeBenchmark()
readBenchmark()
}
}
| wangmiao1981/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala | Scala | apache-2.0 | 3,110 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.Table
import com.intel.analytics.bigdl.nn.CSubTable
import scala.collection.mutable.HashMap
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class CSubTableSpec extends TorchSpec {
"A CDivTable Module" should "generate correct output and grad" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new CSubTable[Double]()
val input1 = Tensor[Double](5).apply1(e => Random.nextDouble())
val input2 = Tensor[Double](5).apply1(e => Random.nextDouble())
val gradOutput = Tensor[Double](5).apply1(e => Random.nextDouble())
val input = new Table()
input(1.toDouble) = input1
input(2.toDouble) = input2
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\\n" +
"module = nn.CSubTable()\\n" +
"output = module:forward(input)\\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Table]
luaOutput1 should be(output)
luaOutput2 should be (gradInput)
println("Test case : CSubTable, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
| jenniew/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/CSubTableSpec.scala | Scala | apache-2.0 | 2,289 |
package no.antares.dbunit.converters
/* CamelNameConverter.scala
Copyright 2011 Tommy Skodje (http://www.antares.no)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/** A name converter that converts camelCased to UPPERCASED_UNDERSCORED
@author Tommy Skodje
*/
class CamelNameConverter( private val nextInChain: DefaultNameConverter ) extends DefaultNameConverter() {
def this() = this( new DefaultNameConverter() );
override def tableName( oldName: String ): String = nextInChain.tableName( camel2underscored( oldName ) );
override def columnName( oldName: String ): String = nextInChain.tableName( camel2underscored( oldName ) );
private def camel2underscored( name: String ): String = {
val underscored = new StringBuilder()
for ( c <- name.toArray[Char] ) {
if ( c.isLower || ( underscored.length == 0 ) )
underscored.append( c.toUpper )
else
underscored.append( "_" ).append( c )
}
underscored.toString
}
} | 2my/test-data-control | src/main/scala/no/antares/dbunit/converters/CamelNameConverter.scala | Scala | apache-2.0 | 1,483 |
package org.jetbrains.bsp.protocol
import java.io.File
import java.net.URI
import ch.epfl.scala.bsp.InitializeBuildParams
import monix.eval.Task
import org.jetbrains.bsp.{BspError, BspErrorMessage}
import org.jetbrains.bsp.protocol.BspServerConnector.BspConnectionMethod
abstract class BspServerConnector(initParams: InitializeBuildParams) {
/**
* Connect to a bsp server with one of the given methods.
* @param methods methods supported by the bsp server, in order of preference
* @return None if no compatible method is found. TODO should be an error response
*/
def connect(methods: BspConnectionMethod*): Task[Either[BspError, BspSession]]
}
object BspServerConnector {
sealed abstract class BspConnectionMethod
final case class UnixLocalBsp(socketFile: File) extends BspConnectionMethod
final case class WindowsLocalBsp(pipeName: String) extends BspConnectionMethod
final case class TcpBsp(host: URI, port: Int) extends BspConnectionMethod
}
/** TODO Connects to a bsp server based on information in .bsp directory */
class GenericConnector(base: File, initParams: InitializeBuildParams) extends BspServerConnector(initParams) {
override def connect(methods: BspConnectionMethod*): Task[Either[BspError, BspSession]] =
Task.now(Left(BspErrorMessage("unknown bsp servers not supported yet")))
}
| jastice/intellij-scala | bsp/src/org/jetbrains/bsp/protocol/bspConnector.scala | Scala | apache-2.0 | 1,341 |
package integrationtest
import model._
import controller.Controllers
import skinny.test.{ FactoryGirl, SkinnyFlatSpec }
class ProgrammersControllerSpec extends SkinnyFlatSpec with unit.SkinnyTesting {
addFilter(Controllers.programmers, "/*")
def skill = Skill.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Skill).create()
}
def company = Company.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Company).create()
}
def programmer = Programmer.findAllWithLimitOffset(1, 0).headOption.getOrElse {
FactoryGirl(Programmer).create()
}
it should "show programmers" in {
get("/programmers") {
status should equal(200)
}
get("/programmers/") {
status should equal(200)
}
get("/programmers.json") {
logger.debug(body)
status should equal(200)
}
get("/programmers.xml") {
logger.debug(body)
status should equal(200)
}
}
it should "show a programmer in detail" in {
get(s"/programmers/${programmer.id}") {
status should equal(200)
}
get(s"/programmers/${programmer.id}.xml") {
logger.debug(body)
status should equal(200)
}
get(s"/programmers/${programmer.id}.json") {
logger.debug(body)
status should equal(200)
}
}
it should "show new entry form" in {
get(s"/programmers/new") {
status should equal(200)
}
}
it should "create a programmer" in {
val newName = s"Created at ${System.currentTimeMillis}"
post(s"/programmers", "name" -> newName) {
status should equal(403)
}
withSession("csrf-token" -> "12345") {
post(s"/programmers",
"name" -> newName,
"favoriteNumber" -> "123",
"companyId" -> company.id.toString,
"plainTextPassword" -> "1234567890",
"csrf-token" -> "12345") {
status should equal(302)
val id = header("Location").split("/").last.toLong
val created = Programmer.findById(id)
created.isDefined should equal(true)
created.get.hashedPassword.verify(PlainPassword("1234567890"), "dummy salt") should equal(true)
}
}
}
it should "show the edit form" in {
get(s"/programmers/${programmer.id}/edit") {
status should equal(200)
}
}
it should "update a programmer" in {
val newName = s"Updated at ${System.currentTimeMillis}"
put(s"/programmers/${programmer.id}", "name" -> newName) {
status should equal(403)
}
Programmer.findById(programmer.id).get.name should not equal (newName)
withSession("csrf-token" -> "12345") {
put(s"/programmers/${programmer.id}",
"name" -> newName,
"favoriteNumber" -> "123",
"companyId" -> company.id.toString,
"csrf-token" -> "12345") {
status should equal(302)
}
put(s"/programmers/${programmer.id}", "csrf-token" -> "12345") {
status should equal(400)
}
}
Programmer.findById(programmer.id).get.name should equal(newName)
}
it should "delete a programmer" in {
val id = Programmer.createWithAttributes(Symbol("name") -> "Unit Test Programmer", Symbol("favoriteNumber") -> 123)
delete(s"/programmers/${id}") {
status should equal(403)
}
withSession("csrf-token" -> "aaaaaa") {
delete(s"/programmers/${id}?csrf-token=aaaaaa") {
status should equal(200)
}
post(s"/programmers/${id}?csrf-token=aaaaaa") {
status should equal(404)
}
post(s"/programmers/${id}.json?csrf-token=aaaaaa") {
status should equal(404)
header("Content-Type") should fullyMatch regex ("application/json;\\\\s*charset=utf-8")
}
post(s"/programmers/${id}.xml?csrf-token=aaaaaa") {
status should equal(404)
header("Content-Type") should fullyMatch regex ("application/xml;\\\\s*charset=utf-8")
}
}
}
it should "add a programmer to a company" in {
val id =
Programmer.createWithAttributes(Symbol("name") -> "JoinCompany Test Programmer", Symbol("favoriteNumber") -> 123)
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/company/${company.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "remove a programmer from a company" in {
val id =
Programmer.createWithAttributes(Symbol("name") -> "LeaveCompany Test Programmer", Symbol("favoriteNumber") -> 123)
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/company/${company.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
delete(s"/programmers/${id}/company?csrf-token=aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "add a skill to a programmer" in {
val id = FactoryGirl(Programmer).create().id
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(409)
}
}
} finally {
Programmer.deleteById(id)
}
}
it should "remove a skill from a programmer" in {
val id = FactoryGirl(Programmer).create().id
try {
withSession("csrf-token" -> "aaaaaa") {
post(s"/programmers/${id}/skills/${skill.id}", "csrf-token" -> "aaaaaa") {
status should equal(200)
}
delete(s"/programmers/${id}/skills/${skill.id}?csrf-token=aaaaaa") {
status should equal(200)
}
}
} finally {
Programmer.deleteById(id)
}
}
}
| skinny-framework/skinny-framework | example/src/test/scala/integrationtest/ProgrammersControllerSpec.scala | Scala | mit | 5,909 |
package text.normalizer
import java.nio.charset.{CodingErrorAction, StandardCharsets}
import java.nio.file.{Path, Paths}
import text.{StringNone, StringOption}
import util.Config
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.sys.process.Process
import scala.util.matching.Regex
/**
* @author K.Sakamoto
* Created on 2016/02/20
*/
class DictionaryBasedNormalizer(dictionaryNameOpt: StringOption) {
private def ascii2native(inputPath: Path): Iterator[String] = {
import util.process.ProcessBuilderUtils._
Process(Seq[String](
s"${System.getProperty("java.home")}/../bin/native2ascii",
"-reverse",
"-encoding", "UTF-8",
inputPath.toAbsolutePath.toString)).lineStream(
StandardCharsets.UTF_8,
CodingErrorAction.REPORT,
CodingErrorAction.REPORT,
StringNone)
}
private val regex: Regex = """([^#:][^:]*):\\[([^#]+)\\](#.*)?""".r
private val terms: Seq[(String, String)] = initialize
private def initialize: Seq[(String, String)] = {
if (dictionaryNameOpt.isEmpty) {
return Nil
}
val dictionaryName: String = dictionaryNameOpt.get
val map: mutable.Map[String, List[String]] = mutable.Map.empty[String, List[String]]
val buffer: ListBuffer[(String, String)] = ListBuffer.empty[(String, String)]
val filePath: Path = Paths.get(Config.resourcesDir, "normalizer", dictionaryName).toAbsolutePath
ascii2native(filePath) foreach {
case regex(representation, notationalVariants, commentOut) =>
val trimmedRepresentation: String = representation.trim match {
case "\\"\\"" => ""
case otherwise => otherwise
}
val sortedNotationalVariants: List[String] = sortNotationVariants(notationalVariants.split(',').toList)
map(trimmedRepresentation) = if (map.contains(trimmedRepresentation)) {
sortNotationVariants(map(trimmedRepresentation) ++ sortedNotationalVariants)
} else {
sortedNotationalVariants
}
case _ =>
//Do nothing
}
sortRepresentations(map.keySet.toList) foreach {
representation: String =>
map(representation) foreach {
notationalVariant: String =>
buffer += ((notationalVariant, representation))
}
}
buffer.result
}
def getTerms: Seq[(String, String)] = terms
protected def sortNotationVariants(notationVariants: List[String]): List[String] = {
notationVariants.sorted//alphabetical order
}
protected def sortRepresentations(representations: List[String]): List[String] = {
representations.sorted//alphabetical order
}
def normalize(text: StringOption): StringOption = {
text map {
t: String =>
var result: String = t
if (terms.nonEmpty) {
terms foreach {
case (term, replacement) =>
result = replaceAll(result, term, replacement)
case _ =>
//Do nothing
}
}
result
}
}
protected def replaceAll(input: String, term: String, replacement: String): String = {
import util.StringUtils._
input.replaceAllLiteratim(term, replacement)
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/text/normalizer/DictionaryBasedNormalizer.scala | Scala | apache-2.0 | 3,216 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.io.File
import org.apache.spark.SparkException
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTableType}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.internal.SQLConf.BUCKETING_MAX_BUCKETS
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.util.Utils
class CreateTableAsSelectSuite extends DataSourceTest with SharedSparkSession {
import testImplicits._
protected override lazy val sql = spark.sql _
private var path: File = null
override def beforeAll(): Unit = {
super.beforeAll()
val ds = (1 to 10).map(i => s"""{"a":$i, "b":"str${i}"}""").toDS()
spark.read.json(ds).createOrReplaceTempView("jt")
}
override def afterAll(): Unit = {
try {
spark.catalog.dropTempView("jt")
Utils.deleteRecursively(path)
} finally {
super.afterAll()
}
}
override def beforeEach(): Unit = {
super.beforeEach()
path = Utils.createTempDir()
path.delete()
}
override def afterEach(): Unit = {
Utils.deleteRecursively(path)
super.afterEach()
}
test("CREATE TABLE USING AS SELECT") {
withTable("jsonTable") {
sql(
s"""
|CREATE TABLE jsonTable
|USING json
|OPTIONS (
| path '${path.toURI}'
|) AS
|SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
sql("SELECT a, b FROM jt"))
}
}
test("CREATE TABLE USING AS SELECT based on the file without write permission") {
// setWritable(...) does not work on Windows. Please refer JDK-6728842.
assume(!Utils.isWindows)
val childPath = new File(path.toString, "child")
path.mkdir()
path.setWritable(false)
val e = intercept[SparkException] {
sql(
s"""
|CREATE TABLE jsonTable
|USING json
|OPTIONS (
| path '${childPath.toURI}'
|) AS
|SELECT a, b FROM jt
""".stripMargin)
sql("SELECT a, b FROM jsonTable").collect()
}
assert(e.getMessage().contains("Job aborted"))
path.setWritable(true)
}
test("create a table, drop it and create another one with the same name") {
withTable("jsonTable") {
sql(
s"""
|CREATE TABLE jsonTable
|USING json
|OPTIONS (
| path '${path.toURI}'
|) AS
|SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
sql("SELECT a, b FROM jt"))
// Creates a table of the same name with flag "if not exists", nothing happens
sql(
s"""
|CREATE TABLE IF NOT EXISTS jsonTable
|USING json
|OPTIONS (
| path '${path.toURI}'
|) AS
|SELECT a * 4 FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
sql("SELECT a, b FROM jt"))
// Explicitly drops the table and deletes the underlying data.
sql("DROP TABLE jsonTable")
if (path.exists()) Utils.deleteRecursively(path)
// Creates a table of the same name again, this time we succeed.
sql(
s"""
|CREATE TABLE jsonTable
|USING json
|OPTIONS (
| path '${path.toURI}'
|) AS
|SELECT b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT * FROM jsonTable"),
sql("SELECT b FROM jt"))
}
}
test("disallows CREATE TEMPORARY TABLE ... USING ... AS query") {
withTable("t") {
val error = intercept[ParseException] {
sql(
s"""
|CREATE TEMPORARY TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|PARTITIONED BY (a)
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
}.getMessage
assert(error.contains("Operation not allowed") &&
error.contains("CREATE TEMPORARY TABLE"))
}
}
test("SPARK-33651: allow CREATE EXTERNAL TABLE ... USING ... if location is specified") {
withTable("t") {
sql(
s"""
|CREATE EXTERNAL TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|AS SELECT 1 AS a, 2 AS b
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.tableType == CatalogTableType.EXTERNAL)
assert(table.location.toString == path.toURI.toString.stripSuffix("/"))
}
}
test("create table using as select - with partitioned by") {
val catalog = spark.sessionState.catalog
withTable("t") {
sql(
s"""
|CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|PARTITIONED BY (a)
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.partitionColumnNames == Seq("a"))
}
}
test("create table using as select - with valid number of buckets") {
val catalog = spark.sessionState.catalog
withTable("t") {
sql(
s"""
|CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.bucketSpec == Option(BucketSpec(5, Seq("a"), Seq("b"))))
}
}
test("create table using as select - with invalid number of buckets") {
withTable("t") {
Seq(0, 100001).foreach(numBuckets => {
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|CLUSTERED BY (a) SORTED BY (b) INTO $numBuckets BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
}.getMessage
assert(e.contains("Number of buckets should be greater than 0 but less than"))
})
}
}
test("create table using as select - with overridden max number of buckets") {
def createTableSql(numBuckets: Int): String =
s"""
|CREATE TABLE t USING PARQUET
|OPTIONS (PATH '${path.toURI}')
|CLUSTERED BY (a) SORTED BY (b) INTO $numBuckets BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
val maxNrBuckets: Int = 200000
val catalog = spark.sessionState.catalog
withSQLConf(BUCKETING_MAX_BUCKETS.key -> maxNrBuckets.toString) {
// Within the new limit
Seq(100001, maxNrBuckets).foreach(numBuckets => {
withTable("t") {
sql(createTableSql(numBuckets))
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.bucketSpec == Option(BucketSpec(numBuckets, Seq("a"), Seq("b"))))
}
})
// Over the new limit
withTable("t") {
val e = intercept[AnalysisException](sql(createTableSql(maxNrBuckets + 1)))
assert(
e.getMessage.contains("Number of buckets should be greater than 0 but less than "))
}
}
}
test("SPARK-17409: CTAS of decimal calculation") {
withTable("tab2") {
withTempView("tab1") {
spark.range(99, 101).createOrReplaceTempView("tab1")
val sqlStmt =
"SELECT id, cast(id as long) * cast('1.0' as decimal(38, 18)) as num FROM tab1"
sql(s"CREATE TABLE tab2 USING PARQUET AS $sqlStmt")
checkAnswer(spark.table("tab2"), sql(sqlStmt))
}
}
}
test("specifying the column list for CTAS") {
withTable("t") {
val e = intercept[ParseException] {
sql("CREATE TABLE t (a int, b int) USING parquet AS SELECT 1, 2")
}.getMessage
assert(e.contains("Schema may not be specified in a Create Table As Select (CTAS)"))
}
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala | Scala | apache-2.0 | 8,938 |
package com.chriswk.bnet.wow.model
case class Item(armor: Long, baseArmor: Long,
bonusStats: List[StatHolder], buyPrice: Long,
containerSlots: Long, descrription: String,
disenchantingSkillRank: Long, displayInfoId: Long,
equippable: Boolean, hasSockets: Boolean,
heroicTooltip: Boolean, icon: String,
id: Long, name: String, nameDescriptionColor: String)
| chriswk/sbnetapi | src/main/scala/com/chriswk/bnet/wow/model/Item.scala | Scala | mit | 453 |
object Macros {
def foo1[T]: Unit = macro Impls.fooNullary[T]
def foo2[T](): Unit = macro Impls.fooEmpty[T]
def bar1[T](x: Int): Unit = macro Impls.barNullary[T]
def bar2[T](x: Int)(): Unit = macro Impls.barEmpty[T]
}
object Test extends dotty.runtime.LegacyApp {
Macros.foo1[Int]
Macros.foo2[Int]
Macros.foo2[Int]()
Macros.bar1[Int](42)
Macros.bar2[Int](42)()
println("kkthxbai")
} | yusuke2255/dotty | tests/disabled/macro/run/macro-expand-nullary-generic/Macros_Test_2.scala | Scala | bsd-3-clause | 403 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.kryo
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.kryo.serialization.KryoGeometrySerialization
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.languageFeature.postfixOps
@RunWith(classOf[JUnitRunner])
class KryoGeometrySerializerTest extends Specification {
"KryoGeometrySerializer" should {
"correctly serialize and deserialize different geometries" in {
val geoms = Seq(
"LINESTRING(0 2, 2 0, 8 6)",
"POLYGON((20 10, 30 0, 40 10, 30 20, 20 10))",
"MULTIPOINT(0 0, 2 2)",
"MULTILINESTRING((0 2, 2 0, 8 6),(0 2, 2 0, 8 6))",
"MULTIPOLYGON(((-1 0, 0 1, 1 0, 0 -1, -1 0)), ((-2 6, 1 6, 1 3, -2 3, -2 6)), ((-1 5, 2 5, 2 2, -1 2, -1 5)))",
"MULTIPOINT(0 0, 2 2)",
"POINT(55.0 49.0)"
).map(WKTUtils.read)
"using byte arrays" >> {
geoms.foreach { geom =>
val out = new Output(512)
KryoGeometrySerialization.serialize(out, geom)
val in = new Input(out.toBytes)
val deserialized = KryoGeometrySerialization.deserialize(in)
deserialized mustEqual geom
}
success
}
"using streams" >> {
geoms.foreach { geom =>
val out = new Output(new ByteArrayOutputStream(), 512)
KryoGeometrySerialization.serialize(out, geom)
val in = new Input(new ByteArrayInputStream(out.toBytes))
val deserialized = KryoGeometrySerialization.deserialize(in)
deserialized mustEqual geom
}
success
}
}
"be backwards compatible with geometry collections" in {
val geoms = Seq(
"MULTIPOINT ((10 40), (40 30), (20 20), (30 10))",
"MULTIPOINT (10 40, 40 30, 20 20, 30 10)",
"MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))",
"MULTIPOLYGON (((30 20, 45 40, 10 40, 30 20)),((15 5, 40 10, 10 20, 5 10, 15 5)))",
"MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))"
).map(WKTUtils.read)
val serialized126 = Seq(
"1,4,4,1,1,64,36,0,0,0,0,0,0,64,68,0,0,0,0,0,0,1,1,64,68,0,0,0,0,0,0,64,62,0,0,0,0,0,0,1,1,64,52,0,0,0,0,0,0,64,52,0,0,0,0,0,0,1,1,64,62,0,0,0,0,0,0,64,36,0,0,0,0,0,0",
"1,4,4,1,1,64,36,0,0,0,0,0,0,64,68,0,0,0,0,0,0,1,1,64,68,0,0,0,0,0,0,64,62,0,0,0,0,0,0,1,1,64,52,0,0,0,0,0,0,64,52,0,0,0,0,0,0,1,1,64,62,0,0,0,0,0,0,64,36,0,0,0,0,0,0",
"1,5,2,1,2,3,64,36,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,68,0,0,0,0,0,0,1,2,4,64,68,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,36,0,0,0,0,0,0",
"1,6,2,1,3,4,64,62,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,70,-128,0,0,0,0,0,64,68,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,52,0,0,0,0,0,0,0,1,3,5,64,46,0,0,0,0,0,0,64,20,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,20,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,46,0,0,0,0,0,0,64,20,0,0,0,0,0,0,0",
"1,6,2,1,3,4,64,68,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,70,-128,0,0,0,0,0,64,70,-128,0,0,0,0,0,64,62,0,0,0,0,0,0,64,68,0,0,0,0,0,0,64,68,0,0,0,0,0,0,0,1,3,6,64,52,0,0,0,0,0,0,64,65,-128,0,0,0,0,0,64,36,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,36,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,20,0,0,0,0,0,0,64,70,-128,0,0,0,0,0,64,52,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,65,-128,0,0,0,0,0,1,4,64,62,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,46,0,0,0,0,0,0,64,52,0,0,0,0,0,0,64,57,0,0,0,0,0,0,64,62,0,0,0,0,0,0,64,52,0,0,0,0,0,0"
).map(_.split(",").map(_.toByte))
forall(geoms.zip(serialized126)) { case (geom, bytes) =>
val in = new Input(new ByteArrayInputStream(bytes))
val read = KryoGeometrySerialization.deserialize(in)
read mustEqual geom
}
}
}
}
| tkunicki/geomesa | geomesa-features/geomesa-feature-kryo/src/test/scala/org/locationtech/geomesa/features/kryo/KryoGeometrySerializerTest.scala | Scala | apache-2.0 | 4,635 |
package lila.security
import com.github.blemale.scaffeine.Cache
import org.joda.time.Instant
import scala.concurrent.duration.FiniteDuration
import lila.common.base.Levenshtein.isLevenshteinDistanceLessThan
import lila.user.User
final class Flood(duration: FiniteDuration) {
import Flood._
private val floodNumber = 4
private val cache: Cache[User.ID, Messages] = lila.memo.CacheApi.scaffeineNoScheduler
.expireAfterAccess(duration)
.build[User.ID, Messages]()
def allowMessage(uid: User.ID, text: String): Boolean = {
val msg = Message(text, Instant.now)
val msgs = ~cache.getIfPresent(uid)
!duplicateMessage(msg, msgs) && !quickPost(msg, msgs) ~ {
_ ?? cache.put(uid, msg :: msgs)
}
}
private def quickPost(msg: Message, msgs: Messages): Boolean =
msgs.lift(floodNumber) ?? (_.date isAfter msg.date.minus(10000L))
}
private object Flood {
// ui/chat/src/preset.ts
private val passList = Set(
"Hello",
"Good luck",
"Have fun!",
"You too!",
"Good game",
"Well played",
"Thank you",
"I've got to go",
"Bye!"
)
private[security] case class Message(text: String, date: Instant)
private type Messages = List[Message]
private[security] def duplicateMessage(msg: Message, msgs: Messages): Boolean =
!passList.contains(msg.text) && msgs.headOption.?? { m =>
similar(m.text, msg.text) || msgs.tail.headOption.?? { m2 =>
similar(m2.text, msg.text)
}
}
private def similar(s1: String, s2: String): Boolean = {
isLevenshteinDistanceLessThan(s1, s2, (s1.length.min(s2.length) >> 3) atLeast 2)
}
}
| luanlv/lila | modules/security/src/main/Flood.scala | Scala | mit | 1,630 |
import i9916b.T
trait Test {
def foo: T
}
| dotty-staging/dotty | sbt-test/scala2-compat/i9916b/main/i9916b-test.scala | Scala | apache-2.0 | 45 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
// GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp.
package scala.runtime
abstract class AbstractFunction7[-T1, -T2, -T3, -T4, -T5, -T6, -T7, +R] extends Function7[T1, T2, T3, T4, T5, T6, T7, R] {
}
| felixmulder/scala | src/library/scala/runtime/AbstractFunction7.scala | Scala | bsd-3-clause | 742 |
package org.apache.spark.ml.parity.feature
import org.apache.spark.ml.parity.SparkParityBase
import org.apache.spark.ml.feature.{PolynomialExpansion, VectorAssembler}
import org.apache.spark.ml.{Pipeline, Transformer}
import org.apache.spark.sql.DataFrame
/**
* Created by hollinwilkins on 10/30/16.
*/
class PolynomialExpansionParitySpec extends SparkParityBase {
override val dataset: DataFrame = baseDataset.select("dti", "loan_amount")
override val sparkTransformer: Transformer = new Pipeline().setStages(Array(new VectorAssembler().
setInputCols(Array("dti", "loan_amount")).
setOutputCol("features"),
new PolynomialExpansion().
setInputCol("features").
setOutputCol("poly").
setDegree(3))).fit(dataset)
}
| combust/mleap | mleap-spark/src/test/scala/org/apache/spark/ml/parity/feature/PolynomialExpansionParitySpec.scala | Scala | apache-2.0 | 752 |
package test.dao
import com.typesafe.scalalogging.LazyLogging
import wow.dto.WowCharacter
import org.specs2.mutable.{BeforeAfter, Specification}
import org.specs2.specification.Scope
import reactivemongo.bson.{BSONDocument, BSONDocumentReader, Macros}
import scala.concurrent.ExecutionContext.Implicits.global
import org.specs2.concurrent.ExecutionEnv
import org.specs2.specification.mutable.ExecutionEnvironment
import wow.dao.{Collections, Services}
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by Ross on 8/7/2016.
*/
class WowCharacterServiceSpecs extends Specification with ExecutionEnvironment with LazyLogging {
def is(implicit ee: ExecutionEnv) = {
//Since unit tests are executed in parallel we cannot rely on before and after
//the will be executed before and after the unit test but no before and after each other
//this means that they cannot clean the database since it is shared among the unit tests
sequential
"Insert if not exists should succeed" in new WowCharacterServiceContext {
val pg = WowCharacter("Test", 1, 1)
service.insert(pg)
val size = Collections.wowCharacterCollection flatMap { collection =>
collection.find(BSONDocument())
.cursor()
.collect[List]() map (_.size)
}
size must be_==(1).awaitFor(timeOut)
}
"Insert if exsist should update" in new WowCharacterServiceContext {
implicit val reader: BSONDocumentReader[WowCharacter] = Macros.reader[WowCharacter]
val pg = WowCharacter("Test2", 1, 1)
val pg2 = WowCharacter("Test2", 0, 0)
Await.result(service.insert(pg), timeOut)
Await.result(service.insert(pg2), timeOut)
val result = Collections.wowCharacterCollection flatMap { collection =>
collection.find(BSONDocument())
.cursor[WowCharacter]()
.collect[List]()
}
result onSuccess{
case r => logger.debug(r.head.toString)
}
result map(_.size) must be_==(1).awaitFor(timeOut)
result map(_.head) must be_==(pg2).awaitFor(timeOut)
}
}
}
//TODO use context here can be cumbersome
trait WowCharacterServiceContext extends Scope with BeforeAfter with LazyLogging{
val service = Services.wowCharacterService
val timeOut = 10 seconds
def before = Collections.wowCharacterCollection map(_.drop(true))
def after = Collections.wowCharacterCollection map(_.drop(true))
}
| MeMpy/wow-characters | src/test/scala/test/dao/WowCharacterServiceSpecs.scala | Scala | gpl-3.0 | 2,452 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.test
import org.scalatest.{ FlatSpec, Matchers }
import org.schedoscope.DriverTests
import org.schedoscope.dsl.Field.v
import org.schedoscope.dsl.Parameter.p
import test.eci.datahub.{ Click, ClickOfEC0101 }
class HiveTestFrameworkTest extends FlatSpec with Matchers {
val ec0101Clicks = new Click(p("EC0101"), p("2014"), p("01"), p("01")) with rows {
set(
v(id, "event01"),
v(url, "http://ec0101.com/url1"))
set(
v(id, "event02"),
v(url, "http://ec0101.com/url2"))
set(
v(id, "event03"),
v(url, "http://ec0101.com/url3"))
}
val ec0106Clicks = new Click(p("EC0106"), p("2014"), p("01"), p("01")) with rows {
set(
v(id, "event04"),
v(url, "http://ec0106.com/url1"))
set(
v(id, "event05"),
v(url, "http://ec0106.com/url2"))
set(
v(id, "event06"),
v(url, "http://ec0106.com/url3"))
}
"Hive test framework" should "execute hive transformations locally" taggedAs (DriverTests) in {
new ClickOfEC0101(p("2014"), p("01"), p("01")) with test {
basedOn(ec0101Clicks, ec0106Clicks)
`then`()
numRows shouldBe 3
row(v(id) shouldBe "event01",
v(url) shouldBe "http://ec0101.com/url1")
row(v(id) shouldBe "event02",
v(url) shouldBe "http://ec0101.com/url2")
row(v(id) shouldBe "event03",
v(url) shouldBe "http://ec0101.com/url3")
}
}
} | hpzorn/schedoscope | schedoscope-core/src/test/scala/org/schedoscope/test/HiveTestFrameworkTest.scala | Scala | apache-2.0 | 2,024 |
package com.gmail.at.pukanito.model.attributes
/**
* Exception thrown when an attribute definition is added to a model with an already existing id.
*
* @constructor Create a duplicate attribute definition exception.
* @param value The item that failed to added.
*/
class DuplicateAttributeDefinitionException(value: AttributeDefinition)
extends RuntimeException(s"Duplicate attribute definition: ${value.attributeId}") {}
/**
* Trait for constructing an attribute definition graph with a special DSL.
* The attribute definition graph is the attribute model.
*
* To create a model:
* {{{
* object someModel extends AttributeModel {
* attribute("key1") { }
* attribute("key2") { }
* attribute("parent") { has keys("key1", "key2") }
* attribute("child1") { has parents "parent" }
* attribute("child2") { has parents "parent" }
* attribute("child3") { has parents "parent" }
* attribute("child1ofchild1") { has parents "child1" and "parent" }
* attribute("child2ofchild1") { has parents "child1" }
* attribute("child1ofchild3") { has parents "child3" }
* }
* }}}
*
* or
*
* {{{
* val someModel = new AttributeModel {
* attribute("key1") { }
* attribute("key2") { }
* attribute("parent") { has keys("key1", "key2") }
* attribute("child1") { has parents "parent" }
* attribute("child2") { has parents "parent" }
* attribute("child3") { has parents "parent" }
* attribute("child1ofchild1") { has parents "child1" and "parent" }
* attribute("child2ofchild1") { has parents "child1" }
* attribute("child1ofchild3") { has parents "child3" }
* }
* }}}
*
* To access attribute definitions inside the model use the 'attributes' map,
* it contains all AttributeDefinitionNodes by AttributeIdentifier:
* {{{
* val attributeDefinitionNode = someModel.attributes("id")
* }}}
*
* This will return the attribute definition node of the specified attribute and from there
* parents and children will also be accessible.
*/
trait AttributeModel {
import com.gmail.at.pukanito.model.graph.Graph
implicit val graph = new Graph[AttributeDefinitionNode]
/**
* Map of known attribute nodes in this model.
*/
private var definedAttributes: Map[AttributeIdentifier, AttributeDefinitionNode] = Map()
private class isOfType(val id: String) {}
private val IntegerAttribute = new isOfType("Integer")
/**
* Returns a map of all defined attribute nodes.
*/
def attributes = definedAttributes
/**
* Get the definitions of the root attributes (i.e. attributes without parents)
*/
def rootAttributes: Set[AttributeDefinitionNode] = {
(for (
(_, definition) <- definedAttributes
if definition.parents.isEmpty
) yield definition) (collection.breakOut)
}
/** The DSL fields and methods **/
/**
* Class which can handle 'and' word in 'has keys .. and .. and ..'
*
* @param h the hasWord instance which manages the keys.
* @returns this.
*/
private class hasKey(val h: hasWord) {
def and(id: AttributeIdentifier) = { h.attributeValueKeyIds += id; this }
}
/**
* Class which can handle 'and' word in 'has children .. and .. and ..'
*
* @param h the hasWord instance which manages the children.
* @returns this.
*/
private class hasChildren(val h: hasWord) {
def and(id: AttributeIdentifier) = { h.initialChildren += id; this }
}
/**
* Class which can handle 'and' word in 'has parents .. and .. and ..'
*
* @param h the hasWord instance which manages the parents.
* @returns this.
*/
private class hasParents(val h: hasWord) {
def and(id: AttributeIdentifier) = { h.initialParents += id; this }
}
/**
* Builder class for an attribute definition node.
*/
private class hasWord {
/**
* Attribute property: specified key ids.
*/
private[AttributeModel] var attributeValueKeyIds: Set[AttributeIdentifier] = Set()
/**
* Attribute property: specified children ids.
*/
private[AttributeModel] var initialChildren: Set[AttributeIdentifier] = Set()
/**
* Attribute property: specified parent ids.
*/
private[AttributeModel] var initialParents: Set[AttributeIdentifier] = Set()
/**
* Clear the builder.
*/
private[AttributeModel] def clear: Unit = { attributeValueKeyIds = Set(); initialChildren = Set(); initialParents = Set() }
/**
* Build the attribute definition node from the collected properties or add properties to an already existing definition.
*
* @param id the id of the attribute to create.
* @returns the constructed attribute definition node.
*/
private[AttributeModel] def build(id: String): AttributeDefinitionNode = {
val attr = definedAttributes get id match {
case Some(attr) =>
if (attributeValueKeyIds.size > 0) throw new RuntimeException(s"Cannot add attribute keys after first definition of attribute '${attr.attributeId}'")
attr
case None =>
val attr = new AttributeDefinitionNode(id, attributeValueKeyIds.toList)
definedAttributes += attr.attributeId -> attr
attributeValueKeyIds foreach (attr += definedAttributes(_))
attr
}
initialChildren foreach (attr += definedAttributes(_))
initialParents foreach (definedAttributes(_) += attr)
attr
}
/**
* Handler for the 'keys' word in 'has keys .. and .. and ..'
*
* @param ids the specified keys.
* @returns a hasKey instance.
*/
def keys(ids: AttributeIdentifier*): hasKey = { attributeValueKeyIds ++= ids; new hasKey(this) }
/**
* Handler for the 'parents' word in 'has parents .. and .. and ..'
*
* @param ids the specified parent ids.
* @returns a hasParents instance.
*/
def parents(ids: AttributeIdentifier*): hasParents = { initialParents ++= ids; new hasParents(this) }
/**
* Handler for the 'children' word in 'has children .. and .. and ..'
*
* @param ids the specified children ids.
* @returns a hasChildren instance.
*/
def children(ids: AttributeIdentifier*): hasChildren = { initialChildren ++= ids; new hasChildren(this) }
}
/**
* Definition of the 'has' word so that it can be used in attribute definition nodes.
*/
protected[AttributeModel] val has = new hasWord
/**
* Class which implements the control structure for creating attribute definition nodes: the 'attribute' word.
*
* @param t the type of the attribute definition.
*/
private class attributeWord(val t: isOfType = IntegerAttribute) {
/**
* Handler for the 'attribute' word.
*
* @param id the id of the attribute to create.
* @returns the constructed attribute definition.
*/
def apply(id: String)(body: => Unit): AttributeDefinitionNode = {
has.clear
body
has.build(id)
}
}
/**
* Definition of the 'attribute' word so that it can be used in attribute models.
*/
protected[AttributeModel] val attribute = new attributeWord
/**
* Class which implements including another attribute model using the 'include' word.
*
* The included model will be a copy of the original because AttributeDefinitionNode is not immutable.
*/
private class includeWord {
def apply(that: AttributeModel): Unit = {
// Create copies of attributes first.
that.definedAttributes foreach { case (thatAttrId, thatAttrDef) =>
if (definedAttributes contains thatAttrId) throw new DuplicateAttributeDefinitionException(thatAttrDef)
definedAttributes += thatAttrId -> thatAttrDef.copy
}
// Then add parent-child relationships of all added attributes.
that.definedAttributes foreach { case (thatAttrId, thatAttrDef) =>
thatAttrDef.children foreach { case (_, childAttrDef) => definedAttributes(thatAttrId) += definedAttributes(childAttrDef.attributeId)} }
}
}
/**
* Definition of the 'include' word so that it can be used in attribute models.
*/
protected[AttributeModel] val include = new includeWord
}
| pukanito/bigdatanalysis | src/main/scala/com/gmail/at/pukanito/model/attributes/AttributeModel.scala | Scala | gpl-3.0 | 8,130 |
package edu.gemini.model.p1.visibility
import edu.gemini.model.p1.immutable._
import edu.gemini.model.p1.immutable.SemesterOption.{A, B}
import edu.gemini.model.p1.mutable.Band.BAND_1_2
import edu.gemini.model.p1.mutable.GnirsFilter.ORDER_3
import edu.gemini.spModel.core.{Declination, Angle, RightAscension, Coordinates}
import org.junit.Test
import org.junit.Assert._
import TargetVisibility.{Good, Limited, Bad}
import java.util.UUID
class TestTargetVisibilityTest {
val gnLgs = GnirsBlueprintImaging(AltairLGS(pwfs1 = false), GnirsPixelScale.PS_005, ORDER_3)
val gnNgs = gnLgs.copy(altair = AltairNone)
val gsNgs = GmosSBlueprintImaging(Nil)
val gsLgs = GsaoiBlueprint(Nil)
val baseTarget = SiderealTarget(UUID.randomUUID(), "x", Coordinates.zero, CoordinatesEpoch.J_2000, None, Nil)
val baseObsGNNgs = Observation(Some(gnNgs), None, Some(baseTarget), BAND_1_2, None)
val baseObsGNLgs = Observation(Some(gnLgs), None, Some(baseTarget), BAND_1_2, None)
val baseObsGSNgs = Observation(Some(gsNgs), None, Some(baseTarget), BAND_1_2, None)
val baseObsGSLgs = Observation(Some(gsLgs), None, Some(baseTarget), BAND_1_2, None)
val semA = Semester(2012, A)
val semB = Semester(2012, B)
def coordinates(raStr: String, decStr: String): Coordinates = Coordinates(RightAscension.fromAngle(Angle.parseHMS(raStr).getOrElse(Angle.zero)), Declination.fromAngle(Angle.parseDMS(decStr).getOrElse(Angle.zero)).getOrElse(Declination.zero))
@Test def testMissingBlueprint() {
val obs = baseObsGNNgs.copy(blueprint = None)
assertEquals(None, TargetVisibilityCalc.get(semA, obs))
}
@Test def testMissingTarget() {
val obs = baseObsGNNgs.copy(target = None)
assertEquals(None, TargetVisibilityCalc.get(semA, obs))
}
private def gnNgsA(expected: TargetVisibility, coords: (String, String)*) {
ngs(expected, baseObsGNNgs, semA, coords: _*)
}
private def gnNgsB(expected: TargetVisibility, coords: (String, String)*) {
ngs(expected, baseObsGNNgs, semB, coords: _*)
}
private def gsNgsB(expected: TargetVisibility, coords: (String, String)*) {
ngs(expected, baseObsGSNgs, semB, coords: _*)
}
private def gsNgsA(expected: TargetVisibility, coords: (String, String)*) {
ngs(expected, baseObsGSNgs, semA, coords: _*)
}
private def ngs(expected: TargetVisibility, observation: Observation, semester: Semester, coords: (String, String)*) {
coords.foreach { tup =>
val (raStr, decStr) = tup
val target = baseTarget.copy(coords = coordinates(raStr, decStr))
val obs = observation.copy(target = Some(target))
assertEquals(Some(expected), TargetVisibilityCalc.get(semester, obs))
}
}
private def gsLgsA(expected: TargetVisibility, coords: (String, String)*) {
lgs(expected, baseObsGSLgs, semA, coords: _*)
}
private def gsLgsB(expected: TargetVisibility, coords: (String, String)*) {
lgs(expected, baseObsGSLgs, semB, coords: _*)
}
private def gnLgsA(expected: TargetVisibility, coords: (String, String)*) {
lgs(expected, baseObsGNLgs, semA, coords: _*)
}
private def gnLgsB(expected: TargetVisibility, coords: (String, String)*) {
lgs(expected, baseObsGNLgs, semB, coords: _*)
}
private def lgs(expected: TargetVisibility, observation: Observation, semester: Semester, coords: (String, String)*) {
coords foreach { tup =>
val (raStr, decStr) = tup
val target = baseTarget.copy(coords = coordinates(raStr, decStr))
val obs = observation.copy(target = Some(target))
assertEquals(Some(expected), TargetVisibilityCalc.get(semester, obs))
}
}
@Test def testRaGoodDecGood() {
gnNgsA(Good, ("10:00:00", "20:00:00"))
}
@Test def testRaGoodDecBad() {
gnNgsA(Bad, ("10:00:00", "-37:00:00.1"))
}
@Test def testRaGoodDecLimited() {
gnNgsA(Limited, ("10:00:00", "79:00:00"))
}
@Test def testRaIffyDecGood() {
gnNgsA(Limited, ("5:00:00", "20:00:00"))
}
@Test def testRaBadDecGood() {
gnNgsA(Bad, ("1:00:00", "20:00:00"), ("3:59:59.9", "20:00:00"))
}
@Test def testRaBadDecIffy() {
gnNgsA(Bad, ("1:00:00", "73:00:00"), ("3:59:59.9", "78:59:59.9"))
}
@Test def testDecForGSLgs() {
gsLgsA(Limited, ("09:00:00", "-72:00:00"))
gsLgsA(Good, ("09:00:00", "0:00:00"))
gsLgsA(Limited, ("09:00:00", "12:00:00"))
gsLgsA(Bad, ("09:00:00", "20:00:00"))
}
@Test def testDecForGSLgsSemester() {
// between higher than -70 and less than 5 is Good
gsLgsB(Good, ("0:00:00", "-69:59:59.999"))
gsLgsB(Good, ("0:00:00", "4:59:59.999"))
// Less or equal to -75 is Bad
gsLgsB(Bad, ("0:00:00", "-75:00:00"))
gsLgsB(Bad, ("0:00:00", "-90:00:00"))
// More or equal to 5 and less than 15 is warning
gsLgsB(Limited, ("0:00:00", "10:00:00"))
gsLgsB(Limited, ("0:00:00", "14:59:59.999"))
// More than 15 is bad
gsLgsB(Bad, ("0:00:00", "15:00:00"))
gsLgsB(Bad, ("0:00:00", "90:00:00"))
// Up to 10 should be good
gsLgsB(Good, ("0:00:00", "9:59:59"))
}
@Test def testRaGSLgsBSemester() {
// More or equal to 8 and less than 11 is warning
gsLgsB(Limited, ("8:00:00", "0:00:00"))
gsLgsB(Limited, ("10:59:59.999", "0:00:00"))
// More or equal to 11 and less and equal to 19 is bad
gsLgsB(Bad, ("11:00:00", "0:00:00"))
gsLgsB(Bad, ("19:00:00", "0:00:00"))
// More than 19 and less or equal to 20 is warning
gsLgsB(Limited, ("19:00:00.001", "0:00:00"))
gsLgsB(Limited, ("20:00:00", "0:00:00"))
// Other points are good
gsLgsB(Good, ("0:00:00", "0:00:00"))
gsLgsB(Good, ("21:00:00", "0:00:00"))
}
@Test def testRaGSNgsSemester() {
// More or equal to 9 and less than 12 is warning
gsNgsB(Limited, ("9:00:00", "0:00:00"))
gsNgsB(Limited, ("11:59:59.999", "0:00:00"))
// More or equal to 12 and less and equal to 16 is bad
gsNgsB(Bad, ("12:00:00", "0:00:00"))
gsNgsB(Bad, ("16:00:00", "0:00:00"))
// More than 16 and less or equal to 19 is warning
gsNgsB(Limited, ("16:00:00.001", "0:00:00"))
gsNgsB(Limited, ("19:00:00", "0:00:00"))
// Other points are good
gsNgsB(Good, ("0:00:00", "0:00:00"))
gsNgsB(Good, ("20:00:00", "0:00:00"))
}
@Test def testDecGSNgsBSemester() {
// between more than -87 and less than 22 is Good
gsNgsB(Good, ("22:00:00", "-86:59:59.999"))
gsNgsB(Good, ("22:00:00", "21:59:59.999"))
// Less or equal than -87 is warning
gsNgsB(Limited, ("22:00:00", "-87:00:00"))
gsNgsB(Limited, ("22:00:00", "-90:00:00"))
// More or equal to 22 and less than 28 is warning
gsNgsB(Limited, ("22:00:00", "22:00:00"))
gsNgsB(Limited, ("22:00:00", "27:59:59.999"))
}
@Test def testRaGSNgsASemester() {
// More or equal to 0 and less than 2 is warning
gsNgsA(Limited, ("0:00:00", "0:00:00"))
gsNgsA(Limited, ("1:59:59.999", "0:00:00"))
// More than 5 and less or equal to 7 is warning
gsNgsA(Limited, ("5:00:00.001", "0:00:00"))
gsNgsA(Limited, ("7:00:00", "0:00:00"))
// More than 23 and less or equal to 24 is warning
gsNgsA(Limited, ("23:00:00.001", "0:00:00"))
gsNgsA(Limited, ("0:00:00", "0:00:00"))
// More or equal to 2 and less and equal to 5 is bad
gsNgsA(Bad, ("2:00:00", "0:00:00"))
gsNgsA(Bad, ("5:00:00", "0:00:00"))
// Other points are good
gsNgsA(Good, ("7:00:00.001", "0:00:00"))
gsNgsA(Good, ("23:00:00", "0:00:00"))
}
@Test def testRaGSLgsASemester() {
// More or equal to 0 and less than 1 is warning
gsLgsA(Limited, ("0:00:00", "0:00:00"))
gsLgsA(Limited, ("0:59:59.999", "0:00:00"))
// More than 6 and less or equal to 8 is warning
gsLgsA(Limited, ("6:00:00.001", "0:00:00"))
gsLgsA(Limited, ("8:00:00", "0:00:00"))
// More than 22 and less or equal to 24 is warning
gsLgsA(Limited, ("22:00:00.001", "0:00:00"))
gsLgsA(Limited, ("0:00:00", "0:00:00"))
// More or equal to 1 and less and equal to 6 is bad
gsLgsA(Bad, ("1:00:00", "0:00:00"))
gsLgsA(Bad, ("6:00:00", "0:00:00"))
// Other points are good
gsLgsA(Good, ("8:00:00.001", "0:00:00"))
gsLgsA(Good, ("22:00:00", "0:00:00"))
}
@Test def testDecForGNNgs() {
// between higher than -37 and less or equal than -30 is warning
gnNgsB(Limited, ("9:00:00", "-36:59:59.999"))
gnNgsB(Limited, ("9:00:00", "-30:00:00"))
// More or equal than 73 is Limited
gnNgsB(Limited, ("9:00:00", "73:00:00"))
gnNgsB(Limited, ("9:00:00", "90:00:00"))
// Between 0 and up to -37 is Bad
gnNgsB(Bad, ("9:00:00", "-90:00:00"))
gnNgsB(Bad, ("9:00:00", "-37:00:00"))
// Between less than -30 less than -73 is Good
gnNgsB(Good, ("9:00:00", "-29:59:59.999"))
gnNgsB(Good, ("9:00:00", "72:59:59.999"))
}
@Test def testDecForGNLgs() {
// between higher than -30 and less or equal than -25 is warning
gnLgsB(Limited, ("9:00:00", "-29:59:59.999"))
gnLgsB(Limited, ("9:00:00", "-25:00:00"))
// between higher or equal than 65 and less than 70 is warning
gnLgsB(Limited, ("9:00:00", "65:00:00"))
gnLgsB(Limited, ("9:00:00", "69:59:59.999"))
// Less or equal to -30 is Bad
gnLgsB(Bad, ("9:00:00", "-30:00:00"))
gnLgsB(Bad, ("9:00:00", "-90:00:00"))
// More or or equal to 70 is Bad
gnLgsB(Bad, ("9:00:00", "70:00:00"))
gnLgsB(Bad, ("9:00:00", "90:00:00"))
// Between more than -25 and less than 65 is Good
gnLgsB(Good, ("0:00:00", "-24:59:59.999"))
gnLgsB(Good, ("0:00:00", "64:59:59.001"))
}
@Test def testRaGNNgsASemester() {
// More or equal to 0 and less than 1 is warning
gnNgsA(Limited, ("0:00:00", "0:00:00"))
gnNgsA(Limited, ("0:59:59.999", "0:00:00"))
// More than 4 and less or equal to 7 is warning
gnNgsA(Limited, ("4:00:00.001", "0:00:00"))
gnNgsA(Limited, ("7:00:00", "0:00:00"))
// More than 22 and less or equal to 24 is warning
gnNgsA(Limited, ("22:00:00.001", "0:00:00"))
gnNgsA(Limited, ("0:00:00", "0:00:00"))
// More or equal to 1 and less and equal to 4 is bad
gnNgsA(Bad, ("1:00:00", "0:00:00"))
gnNgsA(Bad, ("4:00:00", "0:00:00"))
// Other points are good
gnNgsA(Good, ("7:00:00.1", "0:00:00"))
gnNgsA(Good, ("22:00:00", "0:00:00"))
}
@Test def testRaGNLgsASemester() {
// More or equal to 21 and less than 24 is warning
gnLgsA(Limited, ("21:00:00", "0:00:00"))
gnLgsA(Limited, ("23:59:59.999", "0:00:00"))
// More than 5 and less or equal to 8 is warning
gnLgsA(Limited, ("5:00:00.001", "0:00:00"))
gnLgsA(Limited, ("8:00:00", "0:00:00"))
// More or equal to 0 and less and equal to 5 is bad
gnLgsA(Bad, ("0:00:00", "0:00:00"))
gnLgsA(Bad, ("5:00:00", "0:00:00"))
// Other points are good
gnLgsA(Good, ("8:00:00.001", "0:00:00"))
gnLgsA(Good, ("20:59:59.999", "0:00:00"))
}
@Test def testRaGNNgsBSemester() {
// More or equal to 11 and less than 13:30 is warning
gnNgsB(Limited, ("11:00:00", "0:00:00"))
gnNgsB(Limited, ("13:29:59.999", "0:00:00"))
// More than 17 and less or equal to 19 is warning
gnNgsB(Limited, ("17:00:00.001", "0:00:00"))
gnNgsB(Limited, ("19:00:00", "0:00:00"))
// More or equal to 13:30 and less and equal to 17 is bad
gnNgsB(Bad, ("13:30:00", "0:00:00"))
gnNgsB(Bad, ("17:00:00", "0:00:00"))
// Other points are good
gnNgsB(Good, ("0:00:00", "0:00:00"))
gnNgsB(Good, ("10:59:59.999", "0:00:00"))
gnNgsB(Good, ("19:00:00.001", "0:00:00"))
gnNgsB(Good, ("23:59:59.999", "0:00:00"))
}
@Test def testRaGNLgsBSemester() {
// More or equal to 10 and less than 12:30 is warning
gnLgsB(Limited, ("10:00:00", "0:00:00"))
gnLgsB(Limited, ("12:29:59.999", "0:00:00"))
// More than 18 and less or equal to 20 is warning
gnLgsB(Limited, ("18:00:00.001", "0:00:00"))
gnLgsB(Limited, ("20:00:00", "0:00:00"))
// More or equal to 12:30 and less and equal to 18 is bad
gnLgsB(Bad, ("12:30:00", "0:00:00"))
gnLgsB(Bad, ("18:00:00", "0:00:00"))
// Other points are good
gnNgsB(Good, ("0:00:00", "0:00:00"))
gnNgsB(Good, ("9:59:59.999", "0:00:00"))
gnNgsB(Good, ("20:00:00.001", "0:00:00"))
gnNgsB(Good, ("23:59:59.999", "0:00:00"))
}
@Test def testRaWrap() {
val t0 = baseTarget.copy(coords = coordinates("23:00:00", "20:00:00"))
val t1 = baseTarget.copy(coords = coordinates("00:00:00", "20:00:00"))
val t2 = baseTarget.copy(coords = coordinates("00:30:00", "20:00:00"))
List(t0, t1, t2) foreach { t =>
assertEquals(Some(Limited), TargetVisibilityCalc.get(semA, baseObsGNNgs.copy(target = Some(t))))
assertEquals(Some(Good), TargetVisibilityCalc.get(semB, baseObsGNNgs.copy(target = Some(t))))
}
}
@Test def testRaForSpecialCases() {
val t0 = baseTarget.copy(coords = coordinates("23:00:00", "20:00:00"))
val t1 = baseTarget.copy(coords = coordinates("00:00:00", "20:00:00"))
val t2 = baseTarget.copy(coords = coordinates("00:30:00", "20:00:00"))
List(t0, t1, t2) foreach { t =>
assertEquals(Some(Good), TargetVisibilityCalc.getOnDec(semA, baseObsGNNgs.copy(target = Some(t))))
assertEquals(Some(Good), TargetVisibilityCalc.getOnDec(semB, baseObsGNNgs.copy(target = Some(t))))
}
}
@Test def testDecWrap() {
val t0 = baseTarget.copy(coords = coordinates("10:00:00", "-38:00:00"))
val t1 = baseTarget.copy(coords = coordinates("10:00:00", "-90:00:00"))
val t2 = baseTarget.copy(coords = coordinates("10:00:00", "-50:00:00"))
List(t0, t1, t2) foreach { t =>
assertEquals(Some(Bad), TargetVisibilityCalc.get(semA, baseObsGNNgs.copy(target = Some(t))))
assertEquals(Some(Bad), TargetVisibilityCalc.get(semB, baseObsGNNgs.copy(target = Some(t))))
}
}
@Test def testDecWrapForSpecialCases() {
val t0 = baseTarget.copy(coords = coordinates("10:00:00", "-38:00:00"))
val t1 = baseTarget.copy(coords = coordinates("10:00:00", "-90:00:00"))
val t2 = baseTarget.copy(coords = coordinates("10:00:00", "-50:00:00"))
List(t0, t1, t2) foreach { t =>
assertEquals(Some(Bad), TargetVisibilityCalc.get(semA, baseObsGNNgs.copy(target = Some(t))))
assertEquals(Some(Bad), TargetVisibilityCalc.get(semB, baseObsGNNgs.copy(target = Some(t))))
}
}
} | spakzad/ocs | bundle/edu.gemini.model.p1/src/test/scala/edu/gemini/model/p1/visibility/TestTargetVisibilityTest.scala | Scala | bsd-3-clause | 14,301 |
package demo
import Config._
import slick.sql.SqlProfile.ColumnOption
import scala.concurrent.duration._
import scala.concurrent.Await
/**
* This customizes the Slick code generator. We only do simple name mappings.
* For a more advanced example see https://github.com/cvogt/slick-presentation/tree/scala-exchange-2013
*/
object CustomizedCodeGenerator {
import scala.concurrent.ExecutionContext.Implicits.global
val projectDir = System.getProperty("user.dir")
def main(args: Array[String]): Unit = {
// prepare database
for(script <- initScripts) {
// FIXME don't forget to adjust it according to your environment
val cmd = s"psql -U test -h $pgHost -p 5432 -d test -f $projectDir/src/sql/$script"
val exec = Runtime.getRuntime.exec(cmd)
if (exec.waitFor() == 0) {
println(s"$script finished.")
}
}
// write the generated results to file
Await.result(
codegen.map(_.writeToFile(
"demo.MyPostgresDriver", // use our customized postgres driver
s"$projectDir/target/scala-2.12/src_managed/slick",
"demo",
"Tables",
"Tables.scala"
)),
20.seconds
)
}
val db = slickProfile.api.Database.forURL(url,driver=jdbcDriver)
// filter out desired tables
val included = Seq("COFFEE","SUPPLIER","COFFEE_INVENTORY")
lazy val codegen = db.run {
slickProfile.defaultTables.map(_.filter(t => included contains t.name.name.toUpperCase))
.flatMap( slickProfile.createModelBuilder(_, ignoreInvalidDefaults = false).buildModel )
}.map { model =>
new slick.codegen.SourceCodeGenerator(model) {
override def Table = new Table(_) { table =>
override def Column = new Column(_) { column =>
// customize db type -> scala type mapping, pls adjust it according to your environment
override def rawType: String = {
model.options.find(_.isInstanceOf[ColumnOption.SqlType]).flatMap {
tpe =>
tpe.asInstanceOf[ColumnOption.SqlType].typeName match {
case "hstore" => Option("Map[String, String]")
case "_text"|"text[]"|"_varchar"|"varchar[]" => Option("List[String]")
case "geometry" => Option("com.vividsolutions.jts.geom.Geometry")
case "_int8"|"int8[]" => Option("List[Long]")
case "_int4"|"int4[]" => Option("List[Int]")
case "_int2"|"int2[]" => Option("List[Short]")
case _ => None
}
}.getOrElse{
model.tpe match {
case "java.sql.Date" => "org.joda.time.LocalDate"
case "java.sql.Time" => "org.joda.time.LocalTime"
case "java.sql.Timestamp" => "org.joda.time.LocalDateTime"
case _ =>
super.rawType
}}
}
}
}
// ensure to use our customized postgres driver at `import profile.simple._`
override def packageCode(profile: String, pkg: String, container: String, parentType: Option[String]) : String = {
s"""
package ${pkg}
// AUTO-GENERATED Slick data model
/** Stand-alone Slick data model for immediate use */
object ${container} extends {
val profile = ${profile}
} with ${container}
/** Slick data model trait for extension, choice of backend or usage in the cake pattern. (Make sure to initialize this late.) */
trait ${container}${parentType.map(t => s" extends $t").getOrElse("")} {
val profile: $profile
import profile.api._
${indent(code)}
}
""".trim()
}
}
}
} | tminglei/slick-pg | examples/codegen-customization/codegen/src/main/scala/demo/CustomizedCodeGenerator.scala | Scala | bsd-2-clause | 3,726 |
package jigg.pipeline
/*
Copyright 2013-2016 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Properties
import scala.xml.{Node, Elem, Text, Atom}
import jigg.util.XMLUtil.RichNode
/** This simple annotator just segments a sentence by spaces, i.e.,
* assuming the input sentence is already correctly tokenized.
*/
class SpaceTokenizerAnnotator(override val name: String, override val props: Properties)
extends SentencesAnnotator {
override def newSentenceAnnotation(sentence: Node): Node = {
val sindex = sentence \\@ "id"
val text = sentence.text
val range = (0 until text.size)
def isSpace(c: Char) = c == ' ' || c == '\\t'
val begins = 0 +: (1 until text.size).filter { i => isSpace(text(i-1)) && !isSpace(text(i)) }
val ends = begins map {
range indexWhere (i=>isSpace(text(i)), _) match {
case -1 => text.size
case e => e
}
}
val tokenSeq = begins.zip(ends).zipWithIndex map { case ((b, e), i) =>
<token
id={ sindex + "_tok" + i }
form={ text.substring(b, e) }
characterOffsetBegin={ b+"" }
characterOffsetEnd={ e+"" }/>
}
val tokens = <tokens annotators={ name }>{ tokenSeq }</tokens>
sentence addChild tokens
}
override def requires = Set(Requirement.Ssplit)
override def requirementsSatisfied = Set(Requirement.Tokenize)
}
| mynlp/jigg | src/main/scala/jigg/pipeline/SpaceTokenizerAnnotator.scala | Scala | apache-2.0 | 1,886 |
package io.github.shogowada.scalajs.history
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
@js.native
trait History extends js.Object {
def push(url: String): Unit = js.native
def replace(url: String): Unit = js.native
def go(delta: Int): Unit = js.native
def goBack(): Unit = js.native
def goForward(): Unit = js.native
}
@js.native
@JSImport("history", JSImport.Namespace)
object History extends js.Object {
def createBrowserHistory(): History = js.native
def createHashHistory(): History = js.native
def createMemoryHistory(): History = js.native
}
| shogowada/scalajs-reactjs | history/src/main/scala/io/github/shogowada/scalajs/history/History.scala | Scala | mit | 593 |
package shardakka.keyvalue
import akka.actor._
import akka.contrib.pattern.{ ClusterSingletonProxy, ClusterSingletonManager }
import akka.pattern.ask
import akka.util.Timeout
import im.actor.server.commons.serialization.ActorSerializer
import shardakka.{ StringCodec, Codec, ShardakkaExtension }
import scala.collection.immutable
import scala.concurrent.{ ExecutionContext, Future }
private case object End
case class SimpleKeyValue[A](
name: String,
private val root: ActorRef,
private val proxy: ActorRef,
private val codec: Codec[A]
) {
def upsert(key: String, value: A)(implicit ec: ExecutionContext, timeout: Timeout): Future[Unit] =
(proxy ? ValueCommands.Upsert(key, codec.toBytes(value))) map (_ ⇒ ())
def delete(key: String)(implicit ec: ExecutionContext, timeout: Timeout): Future[Unit] =
(proxy ? ValueCommands.Delete(key)) map (_ ⇒ ())
def get(key: String)(implicit ec: ExecutionContext, timeout: Timeout): Future[Option[A]] =
(proxy ? ValueQueries.Get(key)).mapTo[ValueQueries.GetResponse] map (_.value.map(codec.fromBytes))
def getKeys()(implicit ec: ExecutionContext, timeout: Timeout): Future[Seq[String]] =
(proxy ? RootQueries.GetKeys()).mapTo[RootQueries.GetKeysResponse] map (_.keys)
private[keyvalue] def shutdown(): Unit = {
proxy ! End
root ! PoisonPill
proxy ! PoisonPill
}
}
trait SimpleKeyValueExtension {
this: ShardakkaExtension ⇒
ActorSerializer.register(5201, classOf[RootEvents.KeyCreated])
ActorSerializer.register(5202, classOf[RootEvents.KeyDeleted])
ActorSerializer.register(5301, classOf[ValueCommands.Upsert])
ActorSerializer.register(5302, classOf[ValueCommands.Delete])
ActorSerializer.register(5303, classOf[ValueCommands.Ack])
ActorSerializer.register(5401, classOf[ValueQueries.Get])
ActorSerializer.register(5402, classOf[ValueQueries.GetResponse])
ActorSerializer.register(5501, classOf[ValueEvents.ValueUpdated])
ActorSerializer.register(5502, classOf[ValueEvents.ValueDeleted])
@volatile
private var kvs = immutable.Map.empty[String, SimpleKeyValue[_]]
def simpleKeyValue[A](name: String, codec: Codec[A])(implicit system: ActorSystem): SimpleKeyValue[A] = {
kvs.get(name) match {
case Some(kv) ⇒ kv.asInstanceOf[SimpleKeyValue[A]]
case None ⇒
val manager = system.actorOf(
ClusterSingletonManager.props(
singletonProps = SimpleKeyValueRoot.props(name),
singletonName = name,
terminationMessage = End,
role = None
), name = s"SimpleKeyValueRoot-$name"
)
val proxy = system.actorOf(
ClusterSingletonProxy.props(singletonPath = s"/user/SimpleKeyValueRoot-$name/$name", role = None),
name = s"SimpleKeyValueRoot-$name-Proxy"
)
val kv = SimpleKeyValue(name, manager, proxy, codec)
kvs += (name → kv)
kv
}
}
def simpleKeyValue(name: String)(implicit system: ActorSystem): SimpleKeyValue[String] =
simpleKeyValue(name, StringCodec)
def shutdownKeyValue(name: String) = kvs.get(name) foreach { kv ⇒
kv.shutdown()
kvs -= name
}
}
| winiceo/actor-platform | actor-server/shardakka/src/main/scala/shardakka/keyvalue/SimpleKeyValue.scala | Scala | mit | 3,173 |
/*
Copyright (c) 2009-2012, The Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.cdlib.was.weari;
import scala.collection.mutable.ArrayBuffer;
abstract class CachingIterator[T] extends Iterator[T] {
protected var cachePos = 0;
protected var cache = new ArrayBuffer[T]();
def fillCache;
def _fillCache {
if (cache.length <= cachePos) {
cache.clear;
cachePos = 0;
fillCache;
}
}
def peek : Option[T] =
if (hasNext) { Some(cache(cachePos)); }
else { return None; }
def hasNext : Boolean = {
if (cache.length <= cachePos) _fillCache;
return (cache.length != 0);
}
def next : T = {
if (!hasNext) throw new NoSuchElementException();
val retval = cache(cachePos);
cachePos += 1;
return retval;
}
}
| cdlib/weari | src/main/scala/org/cdlib/was/weari/CachingIterator.scala | Scala | bsd-3-clause | 2,246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Date, UUID}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileAlreadyExistsException, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.{ProjectExec, SortExec, SparkPlan, SQLExecution, UnsafeExternalRowSorter}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StringType
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.{SerializableConfiguration, Utils}
/** A helper object for writing FileFormat data out to a location. */
object FileFormatWriter extends Logging {
/** Describes how output files should be placed in the filesystem. */
case class OutputSpec(
outputPath: String,
customPartitionLocations: Map[TablePartitionSpec, String],
outputColumns: Seq[Attribute])
/** A function that converts the empty string to null for partition values. */
case class Empty2Null(child: Expression) extends UnaryExpression with String2StringExpression {
override def convert(v: UTF8String): UTF8String = if (v.numBytes() == 0) null else v
override def nullable: Boolean = true
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
s"""if ($c.numBytes() == 0) {
| ${ev.isNull} = true;
| ${ev.value} = null;
|} else {
| ${ev.value} = $c;
|}""".stripMargin
})
}
override protected def withNewChildInternal(newChild: Expression): Empty2Null =
copy(child = newChild)
}
/** Describes how concurrent output writers should be executed. */
case class ConcurrentOutputWriterSpec(
maxWriters: Int,
createSorter: () => UnsafeExternalRowSorter)
/**
* Basic work flow of this command is:
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
* 5. If the job is successfully committed, perform post-commit operations such as
* processing statistics.
* @return The set of all partition paths that were updated during this write job.
*/
def write(
sparkSession: SparkSession,
plan: SparkPlan,
fileFormat: FileFormat,
committer: FileCommitProtocol,
outputSpec: OutputSpec,
hadoopConf: Configuration,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
statsTrackers: Seq[WriteJobStatsTracker],
options: Map[String, String])
: Set[String] = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, new Path(outputSpec.outputPath))
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = outputSpec.outputColumns.filterNot(partitionSet.contains)
var needConvert = false
val projectList: Seq[NamedExpression] = plan.output.map {
case p if partitionSet.contains(p) && p.dataType == StringType && p.nullable =>
needConvert = true
Alias(Empty2Null(p), p.name)()
case attr => attr
}
val empty2NullPlan = if (needConvert) ProjectExec(projectList, plan) else plan
val bucketIdExpression = bucketSpec.map { spec =>
val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get)
// Use `HashPartitioning.partitionIdExpression` as our bucket id expression, so that we can
// guarantee the data distribution is same between shuffle and bucketed data source, which
// enables us to only shuffle one side when join a bucketed table and a normal one.
HashPartitioning(bucketColumns, spec.numBuckets).partitionIdExpression
}
val sortColumns = bucketSpec.toSeq.flatMap {
spec => spec.sortColumnNames.map(c => dataColumns.find(_.name == c).get)
}
val caseInsensitiveOptions = CaseInsensitiveMap(options)
val dataSchema = dataColumns.toStructType
DataSourceUtils.verifySchema(fileFormat, dataSchema)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
fileFormat.prepareWrite(sparkSession, job, caseInsensitiveOptions, dataSchema)
val description = new WriteJobDescription(
uuid = UUID.randomUUID.toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = outputSpec.outputColumns,
dataColumns = dataColumns,
partitionColumns = partitionColumns,
bucketIdExpression = bucketIdExpression,
path = outputSpec.outputPath,
customPartitionLocations = outputSpec.customPartitionLocations,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone),
statsTrackers = statsTrackers
)
// We should first sort by partition columns, then bucket id, and finally sorting columns.
val requiredOrdering = partitionColumns ++ bucketIdExpression ++ sortColumns
// the sort order doesn't matter
val actualOrdering = empty2NullPlan.outputOrdering.map(_.child)
val orderingMatched = if (requiredOrdering.length > actualOrdering.length) {
false
} else {
requiredOrdering.zip(actualOrdering).forall {
case (requiredOrder, childOutputOrder) =>
requiredOrder.semanticEquals(childOutputOrder)
}
}
SQLExecution.checkSQLExecutionId(sparkSession)
// propagate the description UUID into the jobs, so that committers
// get an ID guaranteed to be unique.
job.getConfiguration.set("spark.sql.sources.writeJobUUID", description.uuid)
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
committer.setupJob(job)
try {
val (rdd, concurrentOutputWriterSpec) = if (orderingMatched) {
(empty2NullPlan.execute(), None)
} else {
// SPARK-21165: the `requiredOrdering` is based on the attributes from analyzed plan, and
// the physical plan may have different attribute ids due to optimizer removing some
// aliases. Here we bind the expression ahead to avoid potential attribute ids mismatch.
val orderingExpr = bindReferences(
requiredOrdering.map(SortOrder(_, Ascending)), outputSpec.outputColumns)
val sortPlan = SortExec(
orderingExpr,
global = false,
child = empty2NullPlan)
val maxWriters = sparkSession.sessionState.conf.maxConcurrentOutputFileWriters
val concurrentWritersEnabled = maxWriters > 0 && sortColumns.isEmpty
if (concurrentWritersEnabled) {
(empty2NullPlan.execute(),
Some(ConcurrentOutputWriterSpec(maxWriters, () => sortPlan.createSorter())))
} else {
(sortPlan.execute(), None)
}
}
// SPARK-23271 If we are attempting to write a zero partition rdd, create a dummy single
// partition rdd to make sure we at least set up one write task to write the metadata.
val rddWithNonEmptyPartitions = if (rdd.partitions.length == 0) {
sparkSession.sparkContext.parallelize(Array.empty[InternalRow], 1)
} else {
rdd
}
val jobIdInstant = new Date().getTime
val ret = new Array[WriteTaskResult](rddWithNonEmptyPartitions.partitions.length)
sparkSession.sparkContext.runJob(
rddWithNonEmptyPartitions,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
jobIdInstant = jobIdInstant,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.taskAttemptId().toInt & Integer.MAX_VALUE,
committer,
iterator = iter,
concurrentOutputWriterSpec = concurrentOutputWriterSpec)
},
rddWithNonEmptyPartitions.partitions.indices,
(index, res: WriteTaskResult) => {
committer.onTaskCommit(res.commitMsg)
ret(index) = res
})
val commitMsgs = ret.map(_.commitMsg)
logInfo(s"Start to commit write Job ${description.uuid}.")
val (_, duration) = Utils.timeTakenMs { committer.commitJob(job, commitMsgs) }
logInfo(s"Write Job ${description.uuid} committed. Elapsed time: $duration ms.")
processStats(description.statsTrackers, ret.map(_.summary.stats))
logInfo(s"Finished processing stats for write job ${description.uuid}.")
// return a set of all the partition paths that were updated during this job
ret.map(_.summary.updatedPartitions).reduceOption(_ ++ _).getOrElse(Set.empty)
} catch { case cause: Throwable =>
logError(s"Aborting job ${description.uuid}.", cause)
committer.abortJob(job)
throw QueryExecutionErrors.jobAbortedError(cause)
}
}
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
jobIdInstant: Long,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow],
concurrentOutputWriterSpec: Option[ConcurrentOutputWriterSpec]): WriteTaskResult = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date(jobIdInstant), sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
val dataWriter =
if (sparkPartitionId != 0 && !iterator.hasNext) {
// In case of empty job, leave first partition to save meta for file format like parquet.
new EmptyDirectoryDataWriter(description, taskAttemptContext, committer)
} else if (description.partitionColumns.isEmpty && description.bucketIdExpression.isEmpty) {
new SingleDirectoryDataWriter(description, taskAttemptContext, committer)
} else {
concurrentOutputWriterSpec match {
case Some(spec) =>
new DynamicPartitionDataConcurrentWriter(
description, taskAttemptContext, committer, spec)
case _ =>
new DynamicPartitionDataSingleWriter(description, taskAttemptContext, committer)
}
}
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
// Execute the task to write rows out and commit the task.
dataWriter.writeWithIterator(iterator)
dataWriter.commit()
})(catchBlock = {
// If there is an error, abort the task
dataWriter.abort()
logError(s"Job $jobId aborted.")
}, finallyBlock = {
dataWriter.close()
})
} catch {
case e: FetchFailedException =>
throw e
case f: FileAlreadyExistsException if SQLConf.get.fastFailFileFormatOutput =>
// If any output file to write already exists, it does not make sense to re-run this task.
// We throw the exception and let Executor throw ExceptionFailure to abort the job.
throw new TaskOutputFileAlreadyExistException(f)
case t: Throwable =>
throw QueryExecutionErrors.taskFailedWhileWritingRowsError(t)
}
}
/**
* For every registered [[WriteJobStatsTracker]], call `processStats()` on it, passing it
* the corresponding [[WriteTaskStats]] from all executors.
*/
private[datasources] def processStats(
statsTrackers: Seq[WriteJobStatsTracker],
statsPerTask: Seq[Seq[WriteTaskStats]])
: Unit = {
val numStatsTrackers = statsTrackers.length
assert(statsPerTask.forall(_.length == numStatsTrackers),
s"""Every WriteTask should have produced one `WriteTaskStats` object for every tracker.
|There are $numStatsTrackers statsTrackers, but some task returned
|${statsPerTask.find(_.length != numStatsTrackers).get.length} results instead.
""".stripMargin)
val statsPerTracker = if (statsPerTask.nonEmpty) {
statsPerTask.transpose
} else {
statsTrackers.map(_ => Seq.empty)
}
statsTrackers.zip(statsPerTracker).foreach {
case (statsTracker, stats) => statsTracker.processStats(stats)
}
}
}
| wangmiao1981/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala | Scala | apache-2.0 | 15,496 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.web.snippet.node
import com.normation.inventory.ldap.core.InventoryDit
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.queries.Query
import com.normation.rudder.domain.policies.{RuleTarget, Rule}
import com.normation.rudder.domain.nodes.{NodeInfo, NodeGroup}
import com.normation.rudder.services.nodes._
import com.normation.rudder.web.services.ReportDisplayer
import com.normation.rudder.web.services.DisplayNode
import com.normation.rudder.web.model.JsTreeNode
import com.normation.rudder.web.components._
import com.normation.rudder.web.components.popup.CreateCategoryOrGroupPopup
import com.normation.rudder.web.components.ShowNodeDetailsFromNode
import com.normation.rudder.web.components.SearchNodeComponent
import com.normation.utils.Control.sequence
import bootstrap.liftweb.LiftSpringApplicationContext.inject
import scala.xml._
import net.liftweb.common._
import net.liftweb.http._
import net.liftweb.util._
import Helpers._
import net.liftweb.http.js._
import JsCmds._
import JE._
import net.liftmodules.widgets.autocomplete._
import com.normation.exceptions.TechnicalException
import com.normation.inventory.ldap.core.LDAPConstants.OC_NODE
import com.normation.rudder.domain.queries.Or
import com.normation.rudder.services.queries.CmdbQueryParser
import bootstrap.liftweb.RudderConfig
//////////////////////////////////////////////////////////////////////
// Actual snippet implementation
//////////////////////////////////////////////////////////////////////
class SearchNodes extends StatefulSnippet with Loggable {
val lock = new Object
private[this] val quickSearchService = RudderConfig.quickSearchService
private[this] val queryParser = RudderConfig.cmdbQueryParser
private[this] val getFullGroupLibrary = RudderConfig.roNodeGroupRepository.getFullGroupLibrary _
//the popup component to create the group
private[this] val creationPopup = new LocalSnippet[CreateCategoryOrGroupPopup]
private[this] val groupLibrary = getFullGroupLibrary() match {
case Full(x) => x
case eb:EmptyBox =>
val e = eb ?~! "Major error: can not get the node group library"
logger.error(e.messageChain)
throw new Exception(e.messageChain)
}
private[this] def setCreationPopup(query : Option[Query], serverList : Box[Seq[NodeInfo]]) : Unit = {
creationPopup.set(Full(new CreateCategoryOrGroupPopup(
// create a totally invalid group
Some(new NodeGroup(
null,
null,
null,
query,
true,
serverList.openOr(Seq[NodeInfo]()).map(_.id).toSet,
true,
false
)
)
, rootCategory = groupLibrary
, onSuccessCategory= { _ => Noop }
, onSuccessGroup = { (node:NodeGroup, _) => RedirectTo("""/secure/nodeManager/groups#{"groupId":"%s"}""".format(node.id.value)) }
)))
}
val searchNodeComponent = new LocalSnippet[SearchNodeComponent]
private[this] def setNodeGroupCategoryForm(query:Option[Query]) : SearchNodeComponent = {
val sc = new SearchNodeComponent(
"htmlIdCategory"
, query
, srvList
, () => Noop
, showNodeDetails
, groupPage = false
)
searchNodeComponent.set(Full(sc))
sc
}
var srvList : Box[Seq[NodeInfo]] = Empty
setNodeGroupCategoryForm(None)
// The portlet for the server detail
private def serverPortletPath = List("templates-hidden", "server", "server_details")
private def serverPortletTemplateFile() = Templates(serverPortletPath) match {
case Empty | Failure(_,_,_) =>
throw new TechnicalException("Template for node details not found. I was looking for %s.html".format(serverPortletPath.mkString("/")))
case Full(n) => n
}
private def serverPortletTemplate = chooseTemplate("server","portlet",serverPortletTemplateFile)
private def serverDetailsTemplate = chooseTemplate("detail","server",serverPortletTemplateFile)
private def searchNodes = chooseTemplate("query","SearchNodes",serverPortletTemplateFile)
// the container for the server selected
// var currentSelectedNode = Option.empty[NodeInfo]
var dispatch : DispatchIt = {
case "showQuery" => searchNodeComponent.is match {
case Full(component) => { _ => component.buildQuery }
case _ => { _ => <div>The component is not set</div><div></div> }
}
case "head" => head _
case "createGroup" => createGroup _
case "serverPorlet" => serverPortlet _
}
var activateSubmitButton = true
def head(html:NodeSeq) : NodeSeq = {
import net.liftweb.json._
import net.liftweb.json.JsonDSL._
{ <head>
<script type="text/javascript" src="/javascript/jquery/ui/jquery.ui.datepicker.js"></script>
<script type="text/javascript" src="/javascript/jquery/ui/i18n/jquery-ui-i18n.js"></script>
{Script(OnLoad(parseJsArg()))}
</head> } ++ ShowNodeDetailsFromNode.staticInit
}
/**
* Display the server portlet
* @param html
* @return
*/
def serverPortlet(html:NodeSeq) : NodeSeq = {
def buildQuery(current: String, limit: Int): Seq[String] = {
quickSearchService.lookup(current,20) match {
case Full(seq) => seq.map(nodeInfo => "%s [%s]".format(nodeInfo.hostname, nodeInfo.id.value))
case e:EmptyBox => {
logger.error("Error in quicksearch",e)
Seq()
}
}
}
/*
* parse the return of the text show in autocomplete, build
* in buildQuery ( hostname name [uuuid] )
*/
def parse(s:String) : JsCmd = {
val regex = """.+\\[(.+)\\]""".r
s match {
case regex(id) =>
SetHtml("serverDetails", (new ShowNodeDetailsFromNode(NodeId(id), groupLibrary).display())) &
updateLocationHash(id)
case _ =>
Alert("No server was selected")
}
}
bind("server", serverPortletTemplate,
"quicksearch" -> AutoCompleteAutoSubmit("", buildQuery _, { s:String => parse(s) },
("style" -> "width:300px"),
("placeholder" -> "Search")),
/*"quicksearchSubmit" -> SHtml.ajaxSubmit("OK", { () =>
nodeId match {
case Some(id) => SetHtml("serverDetails", (new ShowNodeDetailsFromNode(id).display) )
case None => Alert("No server was selected")
}
} ),*/
"details" -> NodeSeq.Empty
)
}
def createGroup(html:NodeSeq) : NodeSeq = {
SHtml.ajaxButton("Create node group from this query", {
() => showPopup() },
("class", "largeButton"))
}
/**
* If a query is passed as argument, try to dejsoniffy-it, in a best effort
* way - just don't take of errors.
*
* We want to look for #{ "ruleId":"XXXXXXXXXXXX" }
*/
private[this] def parseJsArg(): JsCmd = {
def displayDetails(nodeId:String) = {
SetHtml("serverDetails", (new ShowNodeDetailsFromNode(new NodeId(nodeId), groupLibrary)).display())
}
def executeQuery(query:String) : JsCmd = {
val q = queryParser(query)
val sc = setNodeGroupCategoryForm(q)
q match {
case f:Failure =>
logger.debug(f.messageChain)
Noop
case e:EmptyBox => Noop
case Full(q) =>
Replace("SearchNodes", sc.buildQuery()) &
JsRaw("correctButtons(); $('#SubmitSearch').click();")
}
}
JsRaw("""
var hash = null;
try {
hash = JSON.parse(window.location.hash.substring(1));
} catch(e) {
hash = {}
}
if( hash.nodeId != null && hash.nodeId.length > 0) {
%s;
}
if( hash.query != null && JSON.stringify(hash.query).length > 0) {
%s;
}
""".format(
SHtml.ajaxCall(JsVar("hash","nodeId"), displayDetails _ )._2.toJsCmd
, SHtml.ajaxCall(JsRaw("JSON.stringify(hash.query)"), executeQuery _ )._2.toJsCmd
))
}
/**
* Create the popup
*/
private[this] def createPopup : NodeSeq = {
creationPopup.is match {
case Failure(m,_,_) => <span class="error">Error: {m}</span>
case Empty => <div>The component is not set</div>
case Full(popup) => popup.popupContent()
}
}
private[this] def showPopup() : JsCmd = {
searchNodeComponent.is match {
case Full(r) => setCreationPopup(r.getQuery, r.getSrvList)
//update UI
SetHtml("createGroupContainer", createPopup) &
JsRaw( """ createPopup("createGroupPopup") """)
case eb:EmptyBox => Alert("Error when trying to retrieve the resquest, please try again")
}
}
/**
* This method tale the values from the JS call (variable in the td), and display the node details from it
* @param s
* @return
*/
private def showNodeDetails(s:String) : JsCmd = {
val arr = s.split("\\\\|")
val nodeId = arr(1)
SetHtml("serverDetails", (new ShowNodeDetailsFromNode(new NodeId(nodeId), groupLibrary)).display()) &
updateLocationHash(nodeId) &
JsRaw("""scrollToElement("serverDetails");""".format(nodeId))
}
private def updateLocationHash(nodeId:String) =
JsRaw("""this.window.location.hash = "#" + JSON.stringify({'nodeId':'%s'})""".format(nodeId))
}
| jooooooon/rudder | rudder-web/src/main/scala/com/normation/rudder/web/snippet/node/SearchNodes.scala | Scala | agpl-3.0 | 10,937 |
package com.ebay.neutrino
import com.ebay.neutrino.config.{NeutrinoSettings, VirtualPool}
import org.scalatest.{FlatSpec, Matchers}
class NeutrinoPoolsTest extends FlatSpec with Matchers with NeutrinoTestSupport {
implicit val core = new NeutrinoCore(NeutrinoSettings.Empty)
def startCore() = false
it should "ensure apply() maps to underlying state" in {
// TODO
}
it should "rudmintary test of neutrino-pools wrapper" in {
val pools = neutrinoPools()
pools() shouldBe empty
// Add a single pool
pools.update(VirtualPool())
pools().size should be (1)
// Add two pools
pools.update(VirtualPool(id="1"), VirtualPool(id="2"))
pools().size should be (2)
pools() map (_.settings.id.toInt) should be (Seq(1,2))
// Add two pools
pools.update(VirtualPool(id="3"))
pools().size should be (1)
pools() map (_.settings.id.toInt) should be (Seq(3))
// Remove all pools
pools.update()
pools().size should be (0)
pools() map (_.settings.id.toInt) should be (Seq())
}
it should "test massive concurrency access for safety" in {
// TODO ...
}
/*
it should "resolve pool by name" in {
val pools = new NeutrinoPools(null)
val seq = Seq(VirtualPool("abc"), VirtualPool("123"), VirtualPool("zyx", protocol=Transport.HTTPS), VirtualPool("987"))
// Setup the pools
pools.update(seq:_*)
pools().size should be (4)
// Attempt to resolve by name
pools.getNamed("abc").map(_.settings) should be (Some(VirtualPool("abc")))
pools.getNamed("987").map(_.settings) should be (Some(VirtualPool("987")))
pools.getNamed("ab7").map(_.settings) should be (None)
// Should only match HTTP on default
pools.getNamed("123", Transport.HTTP).map(_.settings) should be (Some(VirtualPool("123")))
pools.getNamed("123", Transport.HTTPS).map(_.settings) should be (None)
pools.getNamed("123").map(_.settings) should be (Some(VirtualPool("123", Transport.HTTP)))
pools.getNamed("123").map(_.settings) should not be (Some(VirtualPool("123", Transport.HTTPS)))
// Should only match HTTPS against specified
pools.getNamed("zyx").map(_.settings) should be (None)
pools.getNamed("zyx", Transport.HTTP).map(_.settings) should be (None)
pools.getNamed("zyx", Transport.HTTPS).map(_.settings) should be (Some(VirtualPool("zyx", protocol=Transport.HTTPS)))
}
*/
} | eBay/Neutrino | src/test/scala/com/ebay/neutrino/NeutrinoPoolsTest.scala | Scala | apache-2.0 | 2,399 |
package org.eichelberger.sfc.utils
import org.eichelberger.sfc.SpaceFillingCurve._
case class LocalityResult(
locality: Double,
normalizedLocality: Double,
localityInverse: Double,
normalizedLocalityInverse: Double,
sampleSize: Int,
coverage: Double)
object LocalityEstimator {
val maxEvaluations = 1L << 14L
case class SampleItem(dUser: Double, dIndex: Double)
type Sample = Seq[SampleItem]
}
case class LocalityEstimator(curve: SpaceFillingCurve) {
import LocalityEstimator._
lazy val deltas = (0 until curve.n).toList
lazy val fullSampleSize = curve.n * curve.cardinalities.product -
curve.cardinalities.sum
lazy val maxUserDistance = Math.sqrt(curve.cardinalities.map(c => c * c).sum)
// items are known to be 1 unit apart in user space
def sampleItem(a: OrdinalVector, b: OrdinalVector): SampleItem = {
val idxA: OrdinalNumber = curve.index(a)
val idxB: OrdinalNumber = curve.index(b)
SampleItem(1.0, Math.abs(idxA - idxB))
}
// items are known to be 1 unit apart in index space
def sampleItem(a: OrdinalNumber, b: OrdinalNumber): SampleItem = {
val ptA: OrdinalVector = curve.inverseIndex(a)
val ptB: OrdinalVector = curve.inverseIndex(b)
val distUser = Math.sqrt(ptA.zipWith(ptB).map { case (coordA, coordB) =>
(coordA - coordB) * (coordA - coordB) }.sum)
SampleItem(distUser, 1.0)
}
def randomPoint: OrdinalVector =
OrdinalVector(deltas.map(i =>
Math.floor(Math.random() * (curve.cardinalities(i).toDouble - 1.0)).toLong
):_*)
def randomPointAdjacent(a: OrdinalVector): OrdinalVector = {
while (true) {
val dim = Math.floor(Math.random() * (curve.n.toDouble - 1.0)).toInt
val dir = if (Math.random() < 0.5) 1L else -1L
val newCoord = a(dim) + dir
if (newCoord >= 0 && newCoord < curve.cardinalities(dim))
return a.set(dim, newCoord)
}
// dummy; this code is never reached
OrdinalVector()
}
def randomSample: Sample = {
var sample: Sample = Seq[SampleItem]()
var i = 0
while (i < maxEvaluations) {
val a = randomPoint
val b = randomPointAdjacent(a)
sample = sample :+ sampleItem(a, b)
i = i + 1
}
sample
}
def randomSampleInverse: Sample = {
val sample = collection.mutable.ListBuffer[SampleItem]()
var i = 0
while (i < maxEvaluations) {
val idx = Math.floor(Math.random() * (curve.n.toDouble - 2.0)).toLong
sample += sampleItem(idx, idx + 1L)
i = i + 1
}
sample
}
def fullSample: Sample = {
// iterate over all cells in the index space
val cellIdxItr = combinationsIterator(OrdinalVector(curve.cardinalities:_*))
(for (
cellIdx <- cellIdxItr;
deltaDim <- deltas if cellIdx(deltaDim) + 1L < curve.cardinalities(deltaDim);
adjCellIdx = cellIdx.set(deltaDim, cellIdx(deltaDim) + 1L)
) yield sampleItem(cellIdx, adjCellIdx)).toSeq
}
def fullSampleInverse: Sample = {
val sample = collection.mutable.ListBuffer[SampleItem]()
var idx: OrdinalNumber = 0
while ((idx + 1L) < curve.size) {
sample += sampleItem(idx, idx+1)
idx = idx + 1L
}
sample
}
def locality: LocalityResult = {
// pull a sample, constrained by a maximum size
val sample: Sample =
if (fullSampleSize > maxEvaluations) randomSample
else fullSample
val absLocality = sample.map(_.dIndex).sum / sample.size.toDouble
val relLocality = absLocality / curve.size.toDouble
// pull an inverse sample, constrained by a maximum size
val sampleInverse: Sample =
if (fullSampleSize > maxEvaluations) randomSampleInverse
else fullSampleInverse
val absLocalityInverse = sampleInverse.map(_.dUser).sum / sample.size.toDouble
val relLocalityInverse = absLocalityInverse / maxUserDistance
LocalityResult(
absLocality,
relLocality,
absLocalityInverse,
relLocalityInverse,
sample.size,
sample.size.toDouble / fullSampleSize
)
}
}
| cne1x/sfseize | src/main/scala/org/eichelberger/sfc/utils/LocalityEstimator.scala | Scala | apache-2.0 | 4,021 |
package com.googlecode.kanbanik.commands
import com.googlecode.kanbanik.model.Permission
import com.googlecode.kanbanik.dtos.PermissionType
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class EditUserCommandTest extends FlatSpec {
val tested = new EditUserCommand()
"merge" should "return the same if the edited has no permissions" in {
val res = tested.merge(
List(Permission(PermissionType.ReadUser, List())),
List()
)
assert(res == List(Permission(PermissionType.ReadUser, List())))
}
"merge" should "return empty list if the edited has all permissions already" in {
val res = tested.merge(
List(Permission(PermissionType.ReadUser, List())),
List(Permission(PermissionType.ReadUser, List()))
)
assert(res == List())
}
"merge" should "return proper subset" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List()),
Permission(PermissionType.CreateBoard, List()),
Permission(PermissionType.CreateTask_p, List())
),
List(Permission(PermissionType.ReadUser, List()))
)
assert(res == List(
Permission(PermissionType.CreateBoard, List()),
Permission(PermissionType.CreateTask_p, List())))
}
"merge" should "handle also same args" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a", "b", "c", "d"))
),
List(Permission(PermissionType.ReadUser, List("a", "b", "c", "d")))
)
assert(res.size == 0)
}
"merge" should "handle also complicated args" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a", "b", "c", "d"))
),
List(Permission(PermissionType.ReadUser, List("b", "d", "x")))
)
assert(res.head.arg == List("a", "c", "x"))
}
"merge" should "handle also the args" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a", "b"))
),
List(Permission(PermissionType.ReadUser, List("b")))
)
assert(res.head.arg == List("a"))
}
"merge" should "handle also the args with star on left" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a", "b", "*"))
),
List(Permission(PermissionType.ReadUser, List("b")))
)
assert(res.head.arg == List("a", "*"))
}
"merge" should "handle also the args with star on right" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a", "b"))
),
List(Permission(PermissionType.ReadUser, List("*", "b")))
)
assert(res.head.arg == List("a", "*"))
}
"merge" should "add to check permissions also the removed ones" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List())
),
List(Permission(PermissionType.CreateBoard, List()))
)
assert(res == List(
Permission(PermissionType.ReadUser, List()),
Permission(PermissionType.CreateBoard, List())))
}
"merge" should "add to check permissions also the removed handling also args" in {
val res = tested.merge(
List(
Permission(PermissionType.ReadUser, List("a"))
),
List(Permission(PermissionType.ReadUser, List("a", "b")))
)
assert(res == List(
Permission(PermissionType.ReadUser, List("b"))
))
}
}
| kanbanik/kanbanik | kanbanik-server/src/test/scala/com/googlecode/kanbanik/commands/EditUserCommandTest.scala | Scala | apache-2.0 | 3,491 |
package io.github.daviddenton.finagle.aws
import java.nio.charset.StandardCharsets
import javax.crypto.Mac
import javax.crypto.spec.SecretKeySpec
import aws.Hex
object AwsHmacSha256 {
def hash(payload: String): String = hash(payload.getBytes(StandardCharsets.UTF_8))
def hash(payload: Array[Byte]): String = Digest.sha256(payload).toHex
def hmacSHA256(key: Array[Byte], data: String): Array[Byte] = {
val algorithm = "HmacSHA256"
val mac = Mac.getInstance(algorithm)
mac.init(new SecretKeySpec(key, algorithm))
mac.doFinal(data.getBytes(StandardCharsets.UTF_8))
}
def hex(data: Array[Byte]): String = Hex.encode(data)
}
| daviddenton/finagle-aws | src/main/scala/io/github/daviddenton/finagle/aws/AwsHmacSha256.scala | Scala | apache-2.0 | 660 |
/*
* Copyright (c) 2016-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
// Java
import java.net.URI
import org.apache.http.client.utils.URLEncodedUtils
// Scala
import scala.collection.JavaConversions._
import scala.util.control.NonFatal
import scala.util.{Try, Success => TS, Failure => TF}
// Scalaz
import scalaz._
import Scalaz._
// Jackson
import com.fasterxml.jackson.core.JsonParseException
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// Iglu
import iglu.client.{Resolver, SchemaKey}
// This project
import loaders.CollectorPayload
import utils.{JsonUtils => JU}
/**
* Transforms a collector payload which conforms to
* a known version of the StatusGator Tracking webhook
* into raw events.
*/
object StatusGatorAdapter extends Adapter {
// Vendor name for Failure Message
private val VendorName = "StatusGator"
// Tracker version for an StatusGator Tracking webhook
private val TrackerVersion = "com.statusgator-v1"
// Expected content type for a request body
private val ContentType = "application/x-www-form-urlencoded"
// Schemas for reverse-engineering a Snowplow unstructured event
private val EventSchema = SchemaKey("com.statusgator", "status_change", "jsonschema", "1-0-0").toSchemaUri
/**
* Converts a CollectorPayload instance into raw events.
*
* A StatusGator Tracking payload contains one single event
* in the body of the payload, stored within a HTTP encoded
* string.
*
* @param payload The CollectorPayload containing one or more
* raw events as collected by a Snowplow collector
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation. Not used
* @return a Validation boxing either a NEL of RawEvents on
* Success, or a NEL of Failure Strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
(payload.body, payload.contentType) match {
case (None, _) => s"Request body is empty: no ${VendorName} events to process".failureNel
case (_, None) =>
s"Request body provided but content type empty, expected ${ContentType} for ${VendorName}".failureNel
case (_, Some(ct)) if ct != ContentType =>
s"Content type of ${ct} provided, expected ${ContentType} for ${VendorName}".failureNel
case (Some(body), _) if (body.isEmpty) => s"${VendorName} event body is empty: nothing to process".failureNel
case (Some(body), _) => {
val qsParams = toMap(payload.querystring)
Try {
toMap(URLEncodedUtils.parse(URI.create("http://localhost/?" + body), "UTF-8").toList)
} match {
case TF(e) =>
s"${VendorName} incorrect event string : [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
case TS(bodyMap) =>
try {
val a: Map[String, String] = bodyMap
val event = parse(compact(render(a)))
NonEmptyList(
RawEvent(
api = payload.api,
parameters = toUnstructEventParams(TrackerVersion, qsParams, EventSchema, camelize(event), "srv"),
contentType = payload.contentType,
source = payload.source,
context = payload.context
)).success
} catch {
case e: JsonParseException =>
s"${VendorName} event string failed to parse into JSON: [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
}
}
}
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/StatusGatorAdapter.scala | Scala | apache-2.0 | 4,393 |
package pme.connect4.gui
import scalafx.scene.Node
trait ChipView extends Node {
}
| pme123/scala-connect4 | src/main/scala/pme/connect4/gui/ChipView.scala | Scala | mit | 87 |
class Foo(foo: String) {
def bar() = println(foo)
}
| AtomLinter/linter-scalac | spec/fixtures/project2/src/main/scala/Foo.scala | Scala | mit | 54 |
package com.goticks
import akka.actor._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.ExecutionContext
class RestApi(system: ActorSystem, timeout: Timeout)
extends RestRoutes {
implicit val requestTimeout = timeout
implicit def executionContext = system.dispatcher
def createBoxOffice = system.actorOf(BoxOffice.props, BoxOffice.name)
}
trait RestRoutes extends BoxOfficeApi
with EventMarshalling {
import StatusCodes._
def routes: Route = eventsRoute ~ eventRoute ~ ticketsRoute
def eventsRoute =
pathPrefix("events") {
pathEndOrSingleSlash {
get {
// GET /events
onSuccess(getEvents()) { events =>
complete(OK, events)
}
}
}
}
def eventRoute =
pathPrefix("events" / Segment) { event =>
pathEndOrSingleSlash {
post {
// POST /events/:event
entity(as[EventDescription]) { ed =>
onSuccess(createEvent(event, ed.tickets)) {
case BoxOffice.EventCreated(event) => complete(Created, event)
case BoxOffice.EventExists =>
val err = Error(s"$event event exists already.")
complete(BadRequest, err)
}
}
} ~
get {
// GET /events/:event
onSuccess(getEvent(event)) {
_.fold(complete(NotFound))(e => complete(OK, e))
}
} ~
delete {
// DELETE /events/:event
onSuccess(cancelEvent(event)) {
_.fold(complete(NotFound))(e => complete(OK, e))
}
}
}
}
def ticketsRoute =
pathPrefix("events" / Segment / "tickets") { event =>
post {
pathEndOrSingleSlash {
// POST /events/:event/tickets
entity(as[TicketRequest]) { request =>
onSuccess(requestTickets(event, request.tickets)) { tickets =>
if(tickets.entries.isEmpty) complete(NotFound)
else complete(Created, tickets)
}
}
}
}
}
}
trait BoxOfficeApi {
import BoxOffice._
def createBoxOffice(): ActorRef
implicit def executionContext: ExecutionContext
implicit def requestTimeout: Timeout
lazy val boxOffice = createBoxOffice()
def createEvent(event: String, nrOfTickets: Int) =
boxOffice.ask(CreateEvent(event, nrOfTickets))
.mapTo[EventResponse]
def getEvents() =
boxOffice.ask(GetEvents).mapTo[Events]
def getEvent(event: String) =
boxOffice.ask(GetEvent(event))
.mapTo[Option[Event]]
def cancelEvent(event: String) =
boxOffice.ask(CancelEvent(event))
.mapTo[Option[Event]]
def requestTickets(event: String, tickets: Int) =
boxOffice.ask(GetTickets(event, tickets))
.mapTo[TicketSeller.Tickets]
}
//
| RayRoestenburg/akka-in-action | chapter-up-and-running/src/main/scala/com/goticks/RestApi.scala | Scala | mit | 3,021 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.data.rdd
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import scala.collection.mutable
object AutoPersister extends Logging {
val references = mutable.Map.empty[Int, Int]
val defaultStorageLevel = StorageLevel.MEMORY_AND_DISK_SER
def getRef(rdd:RDD[_]): Int = {
references.getOrElse(rdd.id, 0)
}
// force a persist
def persist(rdd:RDD[_], storageLevel: StorageLevel = defaultStorageLevel) = {
rdd.persist(storageLevel)
incrementRef(rdd)
}
// force an unpersist
def unpersist(rdd:RDD[_]) = {
rdd.unpersist()
references.remove(rdd.id)
}
// decrement the ref count, unpersist if needed
def decrementRef(rdd:RDD[_]):Int = {
val cnt = references.getOrElse(rdd.id, return 0) - 1
logInfo("decrement ref: " + rdd.id + " from: " + (cnt + 1) + " to: " + cnt + (if (cnt <= 0) " unpersisting" else ""))
references.put(rdd.id, cnt)
if (cnt <= 0 && rdd.getStorageLevel != StorageLevel.NONE) {
rdd.unpersist()
references.remove(rdd.id)
}
cnt
}
// increment the ref count, persist when the count hits 2 (no need to persist over that)
def incrementRef(rdd:RDD[_], storageLevel: StorageLevel = defaultStorageLevel):Int = {
val cnt = references.getOrElseUpdate(rdd.id, 0) + 1
logInfo("increment ref: " + rdd.id + " from: " + (cnt - 1) + " to: " + cnt + (if (cnt == 2) " persisting" else ""))
references.put(rdd.id, cnt)
if (cnt == 2 && rdd.getStorageLevel == StorageLevel.NONE) {
rdd.persist(storageLevel)
}
cnt
}
}
| akarmas/mrgeo | mrgeo-core/src/main/scala/org/mrgeo/data/rdd/AutoPersister.scala | Scala | apache-2.0 | 2,216 |
package io.iohk.ethereum.metrics
import java.util.concurrent.atomic.AtomicReference
import io.micrometer.core.instrument._
import io.micrometer.core.instrument.simple.SimpleMeterRegistry
import io.prometheus.client.exporter.HTTPServer
import io.prometheus.client.hotspot.DefaultExports
import scala.util.Try
case class Metrics(metricsPrefix: String, registry: MeterRegistry, serverPort: Int = 0) {
private[this] def mkName: String => String = MetricsUtils.mkNameWithPrefix(metricsPrefix)
private lazy val server: HTTPServer = new HTTPServer(serverPort)
def start(): Unit = {
server // We need this to evaluate the lazy val!
DefaultExports.initialize()
}
def close(): Unit = {
registry.close()
server.stop()
}
def deltaSpike(name: String): DeltaSpikeGauge =
new DeltaSpikeGauge(mkName(name), this)
/**
* Returns a [[io.micrometer.core.instrument.Gauge Gauge]].
* @param computeValue A function that computes the current gauge value.
*/
def gauge(name: String, computeValue: () => Double): Gauge =
Gauge
// Note Never use `null` as the value for the second parameter.
// If you do, you risk getting no metrics out of the gauge.
// So we just use a vanilla `this` but any other non-`null`
// value would also do.
.builder(mkName(name), this, (_: Any) => computeValue())
.register(registry)
/**
* Returns a [[io.micrometer.core.instrument.Counter Counter]].
*/
def counter(name: String): Counter =
Counter
.builder(mkName(name))
.register(registry)
/**
* Returns a [[io.micrometer.core.instrument.Timer Timer]].
*/
def timer(name: String): Timer =
Timer
.builder(mkName(name))
.register(registry)
/**
* Returns a [[io.micrometer.core.instrument.DistributionSummary DistributionSummary]].
*/
def distribution(name: String): DistributionSummary =
DistributionSummary
.builder(mkName(name))
.register(registry)
}
object Metrics {
final val MetricsPrefix = "app"
//+ Metrics singleton support
private[this] final val metricsSentinel = Metrics(MetricsPrefix, new SimpleMeterRegistry())
private[this] final val metricsRef = new AtomicReference[Metrics](metricsSentinel)
private[this] def setOnce(metrics: Metrics): Boolean = metricsRef.compareAndSet(metricsSentinel, metrics)
def get(): Metrics = metricsRef.get()
//- Metrics singleton support
/**
* Instantiates and configures the metrics "service". This should happen once in the lifetime of the application.
* After this call completes successfully, you can obtain the metrics service by using `Metrics.get()`.
*/
def configure(config: MetricsConfig): Try[Unit] = {
Try {
if (config.enabled) {
val registry = MeterRegistryBuilder.build(MetricsPrefix)
val metrics = new Metrics(MetricsPrefix, registry, config.port)
if (setOnce(metrics))
metrics.start()
else {
metrics.close()
throw new MetricsAlreadyConfiguredError(previous = metrics, current = get())
}
}
}
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/metrics/Metrics.scala | Scala | mit | 3,135 |
object A {
def foo[T <: List[String]](x : T) : T = x
def bar(f: { def foo[T <: List[S]](x : T) : T } forSome { type S }) = 1
def bar(s: String) = "text"
/*start*/bar(A)/*end*/
}
//Int | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL3354.scala | Scala | apache-2.0 | 192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import scala.collection.JavaConverters._
import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument}
import org.apache.spark.sql.{AnalysisException, Column, DataFrame}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.datasources.v2.orc.OrcTable
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
/**
* A test suite that tests Apache ORC filter API based filter pushdown optimization.
* OrcFilterSuite and HiveOrcFilterSuite is logically duplicated to provide the same test coverage.
* The difference are the packages containing 'Predicate' and 'SearchArgument' classes.
* - OrcFilterSuite uses 'org.apache.orc.storage.ql.io.sarg' package.
* - HiveOrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package.
*/
class OrcFilterSuite extends OrcTest with SharedSQLContext {
protected def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
checker: (SearchArgument) => Unit): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan match {
case PhysicalOperation(_, filters,
DataSourceV2Relation(orcTable: OrcTable, _, options)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
val scanBuilder = orcTable.newScanBuilder(options)
scanBuilder.pushFilters(filters.flatMap(DataSourceStrategy.translateFilter).toArray)
val pushedFilters = scanBuilder.pushedFilters()
assert(pushedFilters.nonEmpty, "No filter is pushed down")
val maybeFilter = OrcFilters.createFilter(query.schema, pushedFilters)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pushedFilters")
checker(maybeFilter.get)
case _ =>
throw new AnalysisException("Can not match OrcTable in the query.")
}
}
protected def checkFilterPredicate
(predicate: Predicate, filterOperator: PredicateLeaf.Operator)
(implicit df: DataFrame): Unit = {
def checkComparisonOperator(filter: SearchArgument) = {
val operator = filter.getLeaves.asScala
assert(operator.map(_.getOperator).contains(filterOperator))
}
checkFilterPredicate(df, predicate, checkComparisonOperator)
}
protected def checkFilterPredicate
(predicate: Predicate, stringExpr: String)
(implicit df: DataFrame): Unit = {
def checkLogicalOperator(filter: SearchArgument) = {
assert(filter.toString == stringExpr)
}
checkFilterPredicate(df, predicate, checkLogicalOperator)
}
protected def checkNoFilterPredicate
(predicate: Predicate, noneSupported: Boolean = false)
(implicit df: DataFrame): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan match {
case PhysicalOperation(_, filters,
DataSourceV2Relation(orcTable: OrcTable, _, options)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
val scanBuilder = orcTable.newScanBuilder(options)
scanBuilder.pushFilters(filters.flatMap(DataSourceStrategy.translateFilter).toArray)
val pushedFilters = scanBuilder.pushedFilters()
if (noneSupported) {
assert(pushedFilters.isEmpty, "Unsupported filters should not show in pushed filters")
} else {
assert(pushedFilters.nonEmpty, "No filter is pushed down")
val maybeFilter = OrcFilters.createFilter(query.schema, pushedFilters)
assert(maybeFilter.isEmpty, s"Couldn't generate filter predicate for $pushedFilters")
}
case _ =>
throw new AnalysisException("Can not match OrcTable in the query.")
}
}
test("filter pushdown - integer") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - long") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - float") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - double") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - string") {
withOrcDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === "1", PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> "1", PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < "2", PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > "3", PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= "1", PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= "4", PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal("1") === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal("1") <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal("2") > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal("3") < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal("1") >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal("4") <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - boolean") {
withOrcDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === true, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> true, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < true, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > false, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= false, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= false, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(false) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(false) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(false) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(true) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(true) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(true) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - decimal") {
withOrcDataFrame((1 to 4).map(i => Tuple1.apply(BigDecimal.valueOf(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === BigDecimal.valueOf(1), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> BigDecimal.valueOf(1), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < BigDecimal.valueOf(2), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > BigDecimal.valueOf(3), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= BigDecimal.valueOf(1), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= BigDecimal.valueOf(4), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(2)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(
Literal(BigDecimal.valueOf(3)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(4)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - timestamp") {
val timeString = "2015-08-20 14:57:00"
val timestamps = (1 to 4).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
new Timestamp(milliseconds)
}
withOrcDataFrame(timestamps.map(Tuple1(_))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === timestamps(0), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> timestamps(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < timestamps(1), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > timestamps(2), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= timestamps(0), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= timestamps(3), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(timestamps(0)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(timestamps(0)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(timestamps(1)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(timestamps(2)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(timestamps(0)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(timestamps(3)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - combinations with logical operators") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate(
'_1.isNotNull,
"leaf-0 = (IS_NULL _1), expr = (not leaf-0)"
)
checkFilterPredicate(
'_1 =!= 1,
"leaf-0 = (IS_NULL _1), leaf-1 = (EQUALS _1 1), expr = (and (not leaf-0) (not leaf-1))"
)
checkFilterPredicate(
!('_1 < 4),
"leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 4), expr = (and (not leaf-0) (not leaf-1))"
)
checkFilterPredicate(
'_1 < 2 || '_1 > 3,
"leaf-0 = (LESS_THAN _1 2), leaf-1 = (LESS_THAN_EQUALS _1 3), " +
"expr = (or leaf-0 (not leaf-1))"
)
checkFilterPredicate(
'_1 < 2 && '_1 > 3,
"leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 2), leaf-2 = (LESS_THAN_EQUALS _1 3), " +
"expr = (and (not leaf-0) leaf-1 (not leaf-2))"
)
}
}
test("filter pushdown - date") {
val dates = Seq("2017-08-18", "2017-08-19", "2017-08-20", "2017-08-21").map { day =>
Date.valueOf(day)
}
withOrcDataFrame(dates.map(Tuple1(_))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === dates(0), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> dates(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < dates(1), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > dates(2), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= dates(0), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= dates(3), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(dates(0)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(dates(0)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(dates(1)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(dates(2)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(dates(0)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(dates(3)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("no filter pushdown - non-supported types") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8)
}
// ArrayType
withOrcDataFrame((1 to 4).map(i => Tuple1(Array(i)))) { implicit df =>
checkNoFilterPredicate('_1.isNull, noneSupported = true)
}
// BinaryType
withOrcDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkNoFilterPredicate('_1 <=> 1.b, noneSupported = true)
}
// MapType
withOrcDataFrame((1 to 4).map(i => Tuple1(Map(i -> i)))) { implicit df =>
checkNoFilterPredicate('_1.isNotNull, noneSupported = true)
}
}
test("SPARK-12218 and SPARK-25699 Converting conjunctions into ORC SearchArguments") {
import org.apache.spark.sql.sources._
// The `LessThan` should be converted while the `StringContains` shouldn't
val schema = new StructType(
Array(
StructField("a", IntegerType, nullable = true),
StructField("b", StringType, nullable = true)))
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
LessThan("a", 10),
StringContains("b", "prefix")
)).get.toString
}
// The `LessThan` should be converted while the whole inner `And` shouldn't
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
LessThan("a", 10),
Not(And(
GreaterThan("a", 1),
StringContains("b", "prefix")
))
)).get.toString
}
// Can not remove unsupported `StringContains` predicate since it is under `Or` operator.
assert(OrcFilters.createFilter(schema, Array(
Or(
LessThan("a", 10),
And(
StringContains("b", "prefix"),
GreaterThan("a", 1)
)
)
)).isEmpty)
// Safely remove unsupported `StringContains` predicate and push down `LessThan`
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
And(
LessThan("a", 10),
StringContains("b", "prefix")
)
)).get.toString
}
// Safely remove unsupported `StringContains` predicate, push down `LessThan` and `GreaterThan`.
assertResult("leaf-0 = (LESS_THAN a 10), leaf-1 = (LESS_THAN_EQUALS a 1)," +
" expr = (and leaf-0 (not leaf-1))") {
OrcFilters.createFilter(schema, Array(
And(
And(
LessThan("a", 10),
StringContains("b", "prefix")
),
GreaterThan("a", 1)
)
)).get.toString
}
}
}
| Aegeaner/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala | Scala | apache-2.0 | 20,052 |
package org.infinispan.spark.suites
import org.infinispan.spark.domain.User
import org.infinispan.spark.test._
import org.scalatest.{DoNotDiscover, FunSuite, Matchers}
@DoNotDiscover
class DatasetWithScalaEntitySuite extends FunSuite with UsersCache with Spark with MultipleServers with Matchers
with DatasetAssertions[User] {
override protected def getNumEntries: Int = 100
override def getConfiguration = {
super.getConfiguration
.addProtoAnnotatedClass(classOf[User])
.setAutoRegisterProto()
.setTargetEntity(classOf[User])
}
test("read data using the DataFrame API") {
val df = getSparkSession.read.format("infinispan").options(getConfiguration.toStringsMap).load()
val filter = df.filter(df("age").gt(30)).filter(df("age").lt(40))
assertDataset(filter, r => r.getAge > 30 && r.getAge < 40)
}
override def row2String(e: User): String = e.name
}
| infinispan/infinispan-spark | src/test/scala/org/infinispan/spark/suites/DatasetWithScalaEntitySuite.scala | Scala | apache-2.0 | 925 |
// -*- mode: Scala;-*-
// Filename: search.scala
// Authors: luciusmeredith
// Creation: Wed Feb 1 09:12:45 2017
// Copyright: See site license
// Description:
// ------------------------------------------------------------------------
package coop.rchain.lib.zipper
import scala.collection.mutable.Map
trait ZipperMap[A]
extends Map[Location[A],Tree[A]] {
}
| rchain/Rholang | src/main/scala/lib/zipper/search.scala | Scala | mit | 432 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import play.api.libs.json.Json
import play.api.libs.json._
import Play.current
// you need this import to have combinators
import play.api.libs.functional.syntax._
import play.api.templates._
import models._
import views._
object APIv1 extends Controller {
//http://www.playframework.com/documentation/2.1.x/ScalaJson
//http://www.playframework.com/documentation/2.1.x/ScalaJsonRequests
//http://www.playframework.com/documentation/2.1.x/ScalaJsonInception
def login = Action(parse.json) { request =>
val json = request.body;
val email = (json \\ "email").validate[String];
val password = (json \\ "password").validate[String];
(email, password) match {
case (JsSuccess(em, _), JsSuccess(ps, _)) => {
(User.findByEmail(em), ps) match {
case (Some(user: User), user.password) => Ok(Json.obj("user" -> Json.toJson(User.userInfo(user)))).withSession(
"user" -> em)
case (Some(user: User), _) => InternalServerError(Json.obj("error" -> JsString("User password not match")))
case _ => InternalServerError(Json.obj("error" -> JsString("User not found")))
}
}
case _ => InternalServerError(Json.obj("error" -> JsString("Bad login request")))
}
}
def logout = Action { request =>
Status(204)(Json.obj("result" -> JsString("Bye"))).withNewSession
}
def userGroups = Action(parse.json) { request =>
//grab project code from json request
val json = request.body;
val project = (json \\ "project").validate[String];
//validate
(project) match {
case JsSuccess(proj, _) => {
//find project by code and get current user email from session
(Project.findByFolder(proj), request.session.get("user")) match {
case (Some(project:Project), Some(email:String)) => {
val groups = Project.findUserGroups(project, email);
Ok(Json.obj("groups" -> Json.toJson(groups)))
}
case _ => InternalServerError(Json.obj("error" -> JsString("Project or user not found")))
}
}
case _ => InternalServerError(Json.obj("error" -> JsString("Project not found in request")))
}
}
def currentUser = Action { request =>
request.session.get("user").map { sessKey =>
User.findByEmail(sessKey) match {
case Some(u:User) =>
Ok(Json.obj("user" -> Json.toJson(User.userInfo(u))))
case _ =>
InternalServerError(Json.obj("error" -> JsString("User not found")))
}
}.getOrElse {
Ok(Json.obj("user" -> JsNull))
}
}
/*
def index = Action { request =>
request.session.get("connected").map { user =>
Ok("Hello " + user)
}.getOrElse {
Unauthorized("Oops, you are not connected")
}
}
*/
def exec = Action(parse.json) { request =>
val json = request.body;
val action = (json \\ "action").validate[String];
val model = (json \\ "model").validate[String];
var email = request.session.get("user");
(action, model, email) match {
case (JsSuccess("get", _), JsSuccess("projects", _), None ) => Unauthorized("Unexpected Json data")
case (JsSuccess("get", _), JsSuccess("projects", _), Some(user: String)) => getProjects(json)
case (JsSuccess("get", _), JsSuccess("project.contracts", _), _ ) => getContracts(json)
case (JsSuccess("get", _), JsSuccess("project.contracts.tasks", _), _ ) => getContractTasks(json)
case _ => BadRequest("Unexpected Json data")
}
}
def getProjects(json: JsValue) = {
val projects = Project.findAll.map { project =>
JsObject(
"id" -> JsString(project.folder) ::
"name" -> JsString(project.name) :: Nil)
}
Ok(Json.obj("projects" -> JsArray(projects)))
}
def getContracts(json: JsValue) = {
val project = (json \\ "project").validate[String];
project match {
case JsSuccess(project, _) => {
val contracts = Contract.findByProjectFolder(project).map { contract =>
JsObject(
"id" -> JsString(contract.id.get.toString) ::
"title" -> JsString(contract.title) :: Nil)
}
Ok(Json.obj("contracts" -> JsArray(contracts)))
}
case _ => BadRequest("Not found project name")
}
}
def getContractTasks(json: JsValue) = {
val project = (json \\ "project").validate[String];
val contract = (json \\ "contract").validate[Long];
(project, contract) match {
case (JsSuccess(project, _), JsSuccess(contract, _) ) => {
val tasks = Task.findByContract(contract).map { task =>
JsObject(
"id" -> JsString(task.id.get.toString) ::
"title" -> JsString(task.title) :: Nil)
}
Ok(Json.obj("tasks" -> JsArray(tasks)))
}
case _ => BadRequest("Not found project name or contract number")
}
}
}
| olegsmetanin/playapingmod | app/controllers/APIv1.scala | Scala | mit | 4,990 |
package com.lookout.borderpatrol
package object routers {
}
| maheshkelkar/borderpatrol | server/src/main/scala/com/lookout/borderpatrol/routers/package.scala | Scala | mit | 62 |
package com.twitter.concurrent
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import com.twitter.util.Return
@RunWith(classOf[JUnitRunner])
class TxTest extends WordSpec {
"Tx.twoParty" should {
"commit when everything goes dandy" in {
val (stx, rtx) = Tx.twoParty(123)
val sf = stx.ack()
assert(sf.poll == None)
val rf = rtx.ack()
assert(sf.poll == Some(Return(Tx.Commit(()))))
assert(rf.poll == Some(Return(Tx.Commit(123))))
}
"abort when receiver nacks" in {
val (stx, rtx) = Tx.twoParty(123)
val sf = stx.ack()
assert(sf.poll == None)
rtx.nack()
assert(sf.poll == Some(Return(Tx.Abort)))
}
"abort when sender nacks" in {
val (stx, rtx) = Tx.twoParty(123)
val rf = rtx.ack()
assert(rf.poll == None)
stx.nack()
assert(rf.poll == Some(Return(Tx.Abort)))
}
"complain on ack ack" in {
val (stx, rtx) = Tx.twoParty(123)
rtx.ack()
assert(intercept[Exception] {
rtx.ack()
} == Tx.AlreadyAckd)
}
"complain on ack nack" in {
val (stx, rtx) = Tx.twoParty(123)
rtx.ack()
assert(intercept[Exception] {
rtx.nack()
} == Tx.AlreadyAckd)
}
"complain on nack ack" in {
val (stx, rtx) = Tx.twoParty(123)
rtx.nack()
assert(intercept[Exception] {
rtx.ack()
} == Tx.AlreadyNackd)
}
"complain on nack nack" in {
val (stx, rtx) = Tx.twoParty(123)
rtx.nack()
assert(intercept[Exception] {
rtx.nack()
} == Tx.AlreadyNackd)
}
"complain when already done" in {
val (stx, rtx) = Tx.twoParty(123)
stx.ack()
rtx.ack()
assert(intercept[Exception] {
stx.ack()
} == Tx.AlreadyDone)
}
}
}
| BuoyantIO/twitter-util | util-core/src/test/scala/com/twitter/concurrent/TxTest.scala | Scala | apache-2.0 | 1,858 |
package ar.com.crypticmind.swagger.modelgen
import com.wordnik.swagger.model.ModelProperty
import reflect.macros.whitebox.Context
class WordnikModelPropertyMapping[C <: Context](val c: C) {
import c.universe._
abstract class ModelPropertyGenerator {
def toModelProperty: c.Expr[ModelProperty]
def dependentTypes: Set[c.Type]
}
object StringModelPropertyGenerator extends ModelPropertyGenerator {
override val toString = "StringModelPropertyGenerator"
val toModelProperty =
c.Expr[ModelProperty] {
q"""
com.wordnik.swagger.model.ModelProperty(
`type` = "string",
qualifiedType = "java.lang.String",
required = true)
"""
}
val dependentTypes = Set.empty[c.Type]
}
object IntModelPropertyGenerator extends ModelPropertyGenerator {
override val toString = "IntModelPropertyGenerator"
val toModelProperty =
c.Expr[ModelProperty] {
q"""
com.wordnik.swagger.model.ModelProperty(
`type` = "int",
qualifiedType = "scala.Int",
required = true)
"""
}
val dependentTypes = Set.empty[c.Type]
}
class ObjectModelPropertyGenerator(t: c.Type) extends ModelPropertyGenerator {
val objectName = t.typeSymbol.name.toString
val qualifiedType = t.typeSymbol.asClass.fullName
override val toString = "ObjectModelPropertyGenerator"
def toModelProperty =
c.Expr[ModelProperty] {
q"""
com.wordnik.swagger.model.ModelProperty(
`type` = $objectName,
qualifiedType = $qualifiedType,
required = true,
items = Some(com.wordnik.swagger.model.ModelRef(`type` = $objectName)))
"""
}
val dependentTypes = Set(t)
}
class OptionModelPropertyGenerator(t: c.Type) extends ModelPropertyGenerator {
val objectType = t.dealias.typeArgs.head.typeSymbol.asClass.fullName
val mapperForType = selectFor(t.dealias.typeArgs.head)
override val toString = s"OptionModelPropertyGenerator(${mapperForType.toString})"
def toModelProperty =
c.Expr[ModelProperty] {
q" ${mapperForType.toModelProperty}.copy(required = false) "
}
val dependentTypes = mapperForType.dependentTypes
}
class EnumModelPropertyGenerator(t: c.Type) extends ModelPropertyGenerator {
override val toString = s"EnumModelPropertyGenerator(${t.typeSymbol.owner})"
def toModelProperty =
c.Expr[ModelProperty] {
q""" {
import reflect.runtime.universe._
import reflect.runtime._
val tpe = typeOf[$t].asInstanceOf[TypeRef]
val pre = tpe.pre
val mod = pre.termSymbol.asModule
val modMirror = currentMirror.reflectModule(mod)
val modInst = modMirror.instance
val values = modInst.asInstanceOf[Enumeration].values.toList.map(_.toString)
com.wordnik.swagger.model.ModelProperty(
`type` = "string",
qualifiedType = "java.lang.String",
required = true,
allowableValues = com.wordnik.swagger.model.AllowableListValues(values))
}
"""
}
val dependentTypes = Set.empty[c.Type]
}
class IterModelPropertyGenerator(t: c.Type) extends ModelPropertyGenerator {
val objectType = t.dealias.typeArgs.head.typeSymbol.asClass.fullName
val mapperForType = selectFor(t.dealias.typeArgs.head)
override val toString = s"IterModelPropertyGenerator(${mapperForType.toString})"
def toModelProperty = {
c.Expr[ModelProperty] {
q""" {
val refType = ${mapperForType.toModelProperty}.`type`
com.wordnik.swagger.model.ModelProperty(
`type` = "array",
qualifiedType = "scala.collection.Iterable",
required = true,
items = Some(com.wordnik.swagger.model.ModelRef(`type` = refType)))
}
"""
}
}
val dependentTypes = mapperForType.dependentTypes
}
def selectFor(t: c.Type): ModelPropertyGenerator = t match {
case str if t <:< c.typeOf[String] => StringModelPropertyGenerator
case int if t <:< c.typeOf[Int] => IntModelPropertyGenerator
case opt if t <:< c.typeOf[Option[_]] => new OptionModelPropertyGenerator(opt)
case enm if t <:< c.typeOf[Enumeration#Value] => new EnumModelPropertyGenerator(enm)
case itr if t <:< c.typeOf[Iterable[_]] => new IterModelPropertyGenerator(itr)
case other => new ObjectModelPropertyGenerator(other)
}
}
| swagger-api/scala-swagger-modelgen | macros/src/main/scala_2.11/ar/com/crypticmind/swagger/modelgen/WordnikModelPropertyMapping.scala | Scala | mit | 4,524 |
package org.denigma.drugage
package object domain {
type DrugId = String
}
| denigma/drugage | app/jvm/src/main/scala/org.denigma.drugage/domain/package.scala | Scala | mpl-2.0 | 80 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.