code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.onion.mongo
import com.onion.model.{User, Meeting}
import reactivemongo.bson.BSONDocument
import spray.json.{RootJsonReader, RootJsonFormat}
import sprest.models.{UniqueSelector, Model}
import sprest.reactivemongo.ReactiveMongoPersistence
import sprest.reactivemongo.typemappers.{NormalizedIdTransformer, SprayJsonTypeMapper}
import scala.concurrent.{Future, ExecutionContext}
import scala.concurrent.ExecutionContext.Implicits.global
import com.onion.util.OptionUtil._
import sprest.Formats._
/**
* Created by famo on 3/7/15.
*/
object DB extends ReactiveMongoPersistence {
import reactivemongo.api._
val driver = new MongoDriver
lazy val connection = driver.connection(List("localhost"))
lazy val db = connection("onion")
implicit object JsonTypeMapper extends SprayJsonTypeMapper with NormalizedIdTransformer
abstract class UnsecuredDAO[M <: Model[String]](collName: String)(withNewId: M => M)(implicit jsformat: RootJsonFormat[M]) extends CollectionDAO[M, String](db(collName)) {
implicit val numberPerPage: Int = 10
case class Selector(id: String) extends UniqueSelector[M, String]
override def generateSelector(id: String) = Selector(id)
override protected def addImpl(m: M)(implicit ec: ExecutionContext) = doAdd(withNewId(m))
override protected def updateImpl(m: M)(implicit ec: ExecutionContext) = doUpdate(m)
override def remove(selector: Selector)(implicit ec: ExecutionContext) = uncheckedRemoveById(selector.id)
def find[P](selector: BSONDocument = BSONDocument(), pageNumber: Int)(implicit numberPerPage: Int, reads: RootJsonReader[P], ec: ExecutionContext, jsformat: RootJsonFormat[P]) = {
implicit val bsonFormat: BSONFormat[P] = generateBSONFormat[P]
collection.find(selector).options(QueryOpts((pageNumber - 1) * numberPerPage, numberPerPage)).cursor[P].collect[List](numberPerPage)
}
}
def newGuid = System.currentTimeMillis + "-" + java.util.UUID.randomUUID.toString
def generateMeetingIds(meeting: Meeting): Meeting = {
val _selection = meeting.selection.getOrElse(List()).map(_.copy(id = newGuid))
val _comments = meeting.comments.getOrElse(List()).map(_.copy(id = newGuid))
val _createTime = meeting.createTime.getOrElse(System.currentTimeMillis())
meeting.copy(id = newGuid, selection = _selection, comments = _comments, createTime = _createTime, updateTime = System.currentTimeMillis())
}
object MeetingDao extends UnsecuredDAO[Meeting]("meeting")(generateMeetingIds) {
def findByCityId(cityId: String, pageNum: Int = 1): Future[List[Meeting]] = find[Meeting](BSONDocument("cityId" -> cityId, "isDeleted" -> false), pageNum)
}
object UserDao extends UnsecuredDAO[User]("user")(_.copy(id = newGuid))
} | jasoncao/onion-ring | src/main/scala/com/onion/mongo/DB.scala | Scala | apache-2.0 | 2,747 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.cluster.sdv.generated
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util._
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, CarbonFileFilter}
import org.apache.carbondata.core.datastore.impl.FileFactory
/**
* Test Class for SetParameterTestcase to verify all scenarios
*/
class SetParameterTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll(): Unit = {
cleanAllTable()
}
private def cleanAllTable(): Unit = {
sql("drop table if exists carbon_table")
sql("drop table if exists emptyColumnValues")
sql("drop table if exists carbon_table_bad_record_logger")
sql("drop table if exists carbon_table_single_pass")
sql("drop table if exists carbon_table_disable_bad_record_logger")
sql("drop table if exists carbon_table_load")
sqlContext.sparkSession.catalog.clearCache()
sql("RESET")
}
override def afterAll(): Unit = {
cleanAllTable()
}
test("TC_001-test SET property for Bad Record Logger Enable=FALSE") {
sql("drop table if exists carbon_table_disable_bad_record_logger")
sql("SET carbon.options.bad.records.logger.enable=false")
sql(
"create table carbon_table_disable_bad_record_logger(empno int, empname String, designation String, " +
"doj Timestamp," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table_disable_bad_record_logger options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary',
|'BAD_RECORDS_ACTION'='REDIRECT','BAD_RECORD_PATH'='$resourcesPath')"""
.stripMargin)
assert(getLogFileCount("default", "carbon_table_disable_bad_record_logger", "0") == 0)
}
test("TC_002-test SET property for Bad Record Logger Enable=TRUE") {
sql("drop table if exists carbon_table_bad_record_logger")
sql("SET carbon.options.bad.records.logger.enable=true")
sql(
"create table carbon_table_bad_record_logger(empno int, empname String, designation String," +
" doj Timestamp," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table_bad_record_logger options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary',
|'BAD_RECORDS_ACTION'='REDIRECT','BAD_RECORD_PATH'='$resourcesPath')"""
.stripMargin)
assert(getLogFileCount("default", "carbon_table_bad_record_logger", "0") >= 1)
}
test("TC_003-test SET property for Bad Record Action=FORCE") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.action=force")
sql(
"create table carbon_table(empno int, empname String, designation String, doj Timestamp," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary')"""
.stripMargin)
checkAnswer(
s"""select count(*) from carbon_table""",
Seq(Row(11)), "SetParameterTestCase-TC_003-test SET property for Bad Record Action=FORCE")
}
test("TC_004-test SET property for Bad Record Action=REDIRECT") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.bad.records.action=redirect")
sql(s"SET carbon.options.bad.record.path=$resourcesPath")
sql(
"create table carbon_table(empno int, empname String, designation String, doj date," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary')"""
.stripMargin)
checkAnswer(
s"""select count(*) from carbon_table""",
Seq(Row(0)), "SetParameterTestCase-TC_004-test SET property for Bad Record Action=REDIRECT")
}
test("TC_005-test SET property for Bad Record Action=IGNORE") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.bad.records.action=ignore")
sql(
"create table carbon_table(empno int, empname String, designation String, doj date," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary')"""
.stripMargin)
checkAnswer(
s"""select count(*) from carbon_table""",
Seq(Row(0)), "SetParameterTestCase-TC_005-test SET property for Bad Record Action=IGNORE")
}
test("TC_006-test SET property for Bad Record Action=FAIL") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.bad.records.action=fail")
sql(
"create table carbon_table(empno int, empname String, designation String, doj date," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
val exMessage = intercept[Exception] {
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table options('FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary')"""
.stripMargin)
}
assert(exMessage.getMessage.contains("Data load failed due to bad record"))
}
test("TC_007-test SET property IS__EMPTY_DATA_BAD_RECORD=FALSE") {
sql("drop table if exists emptyColumnValues")
sqlContext.sparkSession.catalog.clearCache()
sql("RESET")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.is.empty.data.badrecord=false")
sql(
"""CREATE TABLE IF NOT EXISTS emptyColumnValues(ID int,CUST_ID int,cust_name string) STORED
BY 'org.apache.carbondata.format'
""")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/badrecord/doubleqoute.csv' into table
|emptyColumnValues options('SINGLE_PASS'='true')"""
.stripMargin)
checkAnswer(
s"""select count(*) from emptyColumnValues""",
Seq(Row(1)), "SetParameterTestCase-TC_007-test SET property IS__EMPTY_DATA_BAD_RECORD=FALSE")
}
test("TC_008-test SET property IS__EMPTY_DATA_BAD_RECORD=TRUE") {
sql("drop table if exists emptyColumnValues")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.is.empty.data.badrecord=true")
sql("SET carbon.options.bad.records.action=redirect")
sql(s"SET carbon.options.bad.record.path=$resourcesPath")
sql(
"""CREATE TABLE IF NOT EXISTS emptyColumnValues(ID int,CUST_ID int,cust_name string) STORED
BY 'org.apache.carbondata.format'
""")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/badrecord/doubleqoute.csv' into table
|emptyColumnValues options('SINGLE_PASS'='true')"""
.stripMargin)
checkAnswer(
s"""select count(*) from emptyColumnValues""",
Seq(Row(1)), "SetParameterTestCase-TC_008-test SET property IS__EMPTY_DATA_BAD_RECORD=TRUE")
}
test("TC_009-test SET property for Single Pass") {
sql("drop table if exists carbon_table_single_pass")
sql("SET carbon.options.single.pass=true")
sql(
"create table carbon_table_single_pass(empno int, empname String, designation String, doj " +
"Timestamp,workgroupcategory int, workgroupcategoryname String, deptno int, deptname " +
"String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
}
test("TC_010-test SET property for Sort Scope-Local_Sort") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET carbon.options.sort.scope=local_sort")
sql(
"create table carbon_table(empno int, empname String, designation String, doj Timestamp," +
"workgroupcategory int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('SORT_COLUMNS'='empno,empname')")
checkExistence(sql("DESC FORMATTED carbon_table"), true, "local_sort")
val sortscope=sql("DESC FORMATTED carbon_table").collect().filter(_.getString(1).trim.equals("local_sort"))
assertResult(1)(sortscope.length)
assertResult("local_sort")(sortscope(0).getString(1).trim)
}
test("TC_011-test SET property to Enable Unsafe Sort") {
sql("drop table if exists carbon_table")
sql("SET carbon.options.bad.records.logger.enable=true")
sql("SET enable.unsafe.sort=true")
sql(
"create table carbon_table(empno int, empname String, designation String, doj Timestamp," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
}
test("TC_012-test same property with SET and LOAD") {
sql("drop table if exists carbon_table_load")
sql("SET carbon.options.bad.records.logger.enable=false")
sql(
"create table carbon_table_load(empno int, empname String, designation String, doj Timestamp," +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String," +
"projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/sortcolumns/data.csv' into table
|carbon_table_load options('BAD_RECORDS_LOGGER_ENABLE'='TRUE','FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary',
|'BAD_RECORDS_ACTION'='REDIRECT','BAD_RECORD_PATH'='$resourcesPath')"""
.stripMargin)
assert(getLogFileCount("default", "carbon_table_load", "0") >= 1)
}
private def getLogFileCount(dbName: String, tableName: String, segment: String): Int = {
var path = resourcesPath + "/" + dbName + "/" + tableName + "/" + segment + "/" + segment
val carbonFiles = FileFactory.getCarbonFile(path).listFiles(new CarbonFileFilter {
override def accept(file: CarbonFile): Boolean = {
file.getName.endsWith(".log")
}
})
if (carbonFiles != null) {
carbonFiles.length
} else {
0
}
}
}
| manishgupta88/carbondata | integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala | Scala | apache-2.0 | 13,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.api.{DataTypes, TableConfig}
import org.apache.flink.table.functions.FunctionIdentifier
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.functions.utils.ScalarSqlFunction
import org.apache.calcite.rex.RexUtil
import org.apache.calcite.sql.`type`.SqlTypeName
import org.apache.calcite.sql.`type`.SqlTypeName.DATE
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.{DateString, TimeString, TimestampString}
import org.junit.Assert.assertEquals
import org.junit.Test
import java.math.BigDecimal
import java.time.{ZoneId, ZoneOffset}
import java.util.{List => JList, Map => JMap}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* Test for [[PartitionPruner]].
*/
class PartitionPrunerTest extends RexNodeTestBase {
@Test
def testPrunePartitions(): Unit = {
// amount
val t0 = rexBuilder.makeInputRef(allFieldTypes.get(2), 0)
// 100
val t1 = rexBuilder.makeExactLiteral(BigDecimal.valueOf(100L))
val c1 = rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN, t0, t1)
// name
val t2 = rexBuilder.makeInputRef(allFieldTypes.get(0), 1)
// 'test%'
val t3 = rexBuilder.makeLiteral("test%")
val c2 = rexBuilder.makeCall(SqlStdOperatorTable.LIKE, t2, t3)
// amount > 100 and name like 'test%'
val c3 = rexBuilder.makeCall(SqlStdOperatorTable.AND, c1, c2)
val partitionFieldNames = Array("amount", "name", "flag")
val partitionFieldTypes = Array(DataTypes.INT().getLogicalType,
DataTypes.VARCHAR(100).getLogicalType, DataTypes.BOOLEAN().getLogicalType)
val allPartitions: JList[JMap[String, String]] = List(
Map("amount" -> "20", "name" -> "test1", "flag" -> "true").asJava,
Map("amount" -> "150", "name" -> "test2", "flag" -> "false").asJava,
Map("amount" -> "200", "name" -> "Test3", "flag" -> "false").asJava
).asJava
val config = new TableConfig
val prunedPartitions = PartitionPruner.prunePartitions(
config,
partitionFieldNames,
partitionFieldTypes,
allPartitions,
c3
)
assertEquals(1, prunedPartitions.size())
assertEquals("150", prunedPartitions.get(0).get("amount"))
}
@Test
def testPrunePartitionsWithUdf(): Unit = {
// amount
val t0 = rexBuilder.makeInputRef(allFieldTypes.get(2), 1)
// MyUdf(amount)
val t1 = rexBuilder.makeCall(new ScalarSqlFunction(
FunctionIdentifier.of("MyUdf"),
"MyUdf",
Func1,
typeFactory),
t0)
// 100
val t2 = rexBuilder.makeExactLiteral(BigDecimal.valueOf(100L))
// MyUdf(amount) > 100
val c = rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN, t1, t2)
val partitionFieldNames = Array("name", "amount")
val partitionFieldTypes = Array(
DataTypes.VARCHAR(100).getLogicalType, DataTypes.INT().getLogicalType)
val allPartitions: JList[JMap[String, String]] = List(
Map("amount" -> "20", "name" -> "test1").asJava,
Map("amount" -> "150", "name" -> "test2").asJava,
Map("amount" -> "200", "name" -> "Test3").asJava
).asJava
val config = new TableConfig
val prunedPartitions = PartitionPruner.prunePartitions(
config,
partitionFieldNames,
partitionFieldTypes,
allPartitions,
c
)
assertEquals(2, prunedPartitions.size())
assertEquals("150", prunedPartitions.get(0).get("amount"))
assertEquals("200", prunedPartitions.get(1).get("amount"))
}
@Test
def testTimePrunePartitions(): Unit = {
val f0 = rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.DATE), 0)
val f1 = rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.TIME, 0), 1)
val f2 = rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.TIMESTAMP, 3), 2)
val f3 = rexBuilder.makeInputRef(
typeFactory.createSqlType(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE, 3), 3)
val c0 = rexBuilder.makeCall(
SqlStdOperatorTable.GREATER_THAN,
f0,
rexBuilder.makeDateLiteral(new DateString("2018-08-06")))
val c1 = rexBuilder.makeCall(
SqlStdOperatorTable.GREATER_THAN,
f1,
rexBuilder.makeTimeLiteral(new TimeString("12:08:06"), 0))
val c2 = rexBuilder.makeCall(
SqlStdOperatorTable.GREATER_THAN,
f2,
rexBuilder.makeTimestampLiteral(new TimestampString("2018-08-06 12:08:06.123"), 3))
val c3 = rexBuilder.makeCall(
SqlStdOperatorTable.GREATER_THAN,
f3,
rexBuilder.makeTimestampWithLocalTimeZoneLiteral(
new TimestampString("2018-08-06 12:08:06.123"), 3))
val condition = RexUtil.composeConjunction(rexBuilder, Seq(c0, c1, c2, c3))
val partitionFieldNames = Array("f0", "f1", "f2", "f3")
val partitionFieldTypes = Array(
DataTypes.DATE().getLogicalType,
DataTypes.TIME(0).getLogicalType,
DataTypes.TIMESTAMP(3).getLogicalType,
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).getLogicalType)
val allPartitions: JList[JMap[String, String]] = List(
Map(
"f0" -> "2018-08-05",
"f1" -> "12:08:07",
"f2" -> "2018-08-06 12:08:06.124",
"f3" -> "2018-08-06 12:08:06.124").asJava,
Map(
"f0" -> "2018-08-07",
"f1" -> "12:08:05",
"f2" -> "2018-08-06 12:08:06.124",
"f3" -> "2018-08-06 12:08:06.124").asJava,
Map(
"f0" -> "2018-08-07",
"f1" -> "12:08:07",
"f2" -> "2018-08-06 12:08:06.124",
"f3" -> "2018-08-06 12:08:06.124").asJava
).asJava
val config = new TableConfig
config.setLocalTimeZone(ZoneOffset.ofHours(0))
val prunedPartitions = PartitionPruner.prunePartitions(
config,
partitionFieldNames,
partitionFieldTypes,
allPartitions,
condition
)
assertEquals(1, prunedPartitions.size())
assertEquals(allPartitions(2), prunedPartitions(0))
}
}
| wwjiang007/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/utils/PartitionPrunerTest.scala | Scala | apache-2.0 | 6,804 |
package epic.features
import epic.framework.Feature
@SerialVersionUID(1L)
class SpanShapeFeaturizer extends SurfaceFeaturizer[String] with Serializable {
def anchor(words: IndexedSeq[String]): SurfaceFeatureAnchoring[String] = {
new SurfaceFeatureAnchoring[String] {
def featuresForSpan(begin: Int, end: Int): Array[Feature] = {
val sig = SpanShapeGenerator.signatureFor(words, begin, end, includeContext = true)
// println("Features for span " + words.slice(begin, end) + ": " + sig);
// val sig2 = SpanShapeGenerator.signatureFor(words, begin, end, includeContext = false)
// Array(SpanShapeFeature(sig), SpanShapeFeature(sig2))
Array(SpanShapeFeature(sig))
}
}
}
}
class SpanShapeFeaturizerBetter(numContextWords: Int, useRichContext: Boolean) extends SurfaceFeaturizer[String] with Serializable {
def anchor(words: IndexedSeq[String]): SurfaceFeatureAnchoring[String] = {
new SurfaceFeatureAnchoring[String] {
def featuresForSpan(begin: Int, end: Int): Array[Feature] = {
val sig = SpanShapeGenerator.signatureAndContextFor(words, begin, end, numContextWords, useRichContext)
// println("Features for span " + words.slice(begin, end) + ": " + sig);
// val sig2 = SpanShapeGenerator.signatureFor(words, begin, end, includeContext = false)
// Array(SpanShapeFeature(sig), SpanShapeFeature(sig2))
Array(SpanShapeFeature(sig))
}
}
}
}
class FullWordSpanShapeFeaturizer(commonWords: Set[String], numContextWords: Int, useRichContext: Boolean) extends SurfaceFeaturizer[String] with Serializable {
def anchor(words: IndexedSeq[String]): SurfaceFeatureAnchoring[String] = {
new SurfaceFeatureAnchoring[String] {
def featuresForSpan(begin: Int, end: Int): Array[Feature] = {
val sig = SpanShapeGenerator.signatureAndContextFor(words, begin, end, numContextWords, true, commonWords)
// println("Features for span " + words.slice(begin, end) + ": " + sig);
// val sig2 = SpanShapeGenerator.signatureFor(words, begin, end, includeContext = false)
// Array(SpanShapeFeature(sig), SpanShapeFeature(sig2))
Array(SpanShapeFeature(sig))
}
}
}
}
/**
*
* @author dlwh
*/
object SpanShapeGenerator extends Serializable {
val MAX_LEN = 6
def apply(v1: IndexedSeq[String], begin: Int, end: Int): String = signatureFor(v1,begin, end)
def signatureFor(words: IndexedSeq[String], begin: Int, end: Int, includeContext: Boolean = true) = {
val result = new StringBuilder(end-begin)
if (includeContext) {
if (begin - 1 < 0) {
result += '#'
} else {
result += binCharacter(words(begin - 1).head)
}
result += '['
}
var i = begin;
while (i < math.min(begin + MAX_LEN/2 + 1, end)) {
appendWordShape(i, words, result)
i += 1
}
if(i < end) {
//val remainingLength = distanceBinner.binnedDistance(begin, end - MAX_LEN)
//result ++= "~" * remainingLength
result += '~'
i = end - MAX_LEN/2
}
while (i < end) {
appendWordShape(i, words, result)
i += 1
}
if (includeContext) {
result += ']';
if (end >= words.length) {
result += '#';
} else {
result += binCharacter(words(end).head)
}
}
result.toString
}
// Similar, but has the capability to use more and richer context
def signatureAndContextFor(words: IndexedSeq[String], begin: Int, end: Int, numContextWords: Int = 1, richContext: Boolean = false, commonWords: Set[String] = Set.empty) = {
val result = new StringBuilder(end-begin)
var i = begin - numContextWords;
while (i < begin) {
if (i < 0) {
result += '#'
} else {
if(commonWords(words(i))) {
result ++= words(i)
} else if (richContext) {
appendWordShape(i, words, result)
} else {
result += binCharacter(words(i).head)
}
}
i += 1;
}
result += '['
while (i < math.min(begin + MAX_LEN/2 + 1, end)) {
appendWordShape(i, words, result)
i += 1
}
if(i < end) {
//val remainingLength = distanceBinner.binnedDistance(begin, end - MAX_LEN)
//result ++= "~" * remainingLength
result += '~'
i = end - MAX_LEN/2
}
while (i < end) {
appendWordShape(i, words, result)
i += 1
}
result += ']';
while (i < end + numContextWords) {
if (i >= words.length) {
result += '#';
} else {
if(commonWords(words(i))) {
result ++= words(i)
} else if (richContext) {
appendWordShape(i, words, result)
} else {
result += binCharacter(words(i).head)
}
}
i += 1;
}
result.toString
}
def appendWordShape(i: Int, words: IndexedSeq[String], result: StringBuilder) {
val w = if (i < 0 || i >= words.length) "#" else words(i)
if (w.isEmpty) {
// probably won't happen.
result += 'ε'
} else {
var c = w(0)
if(c == '-') {
c = w match {
case "-LRB-" => '('
case "-RRB-" => ')'
case "-LSB-" => '['
case "-RSB-" => ']'
case "-LCB-" => '{'
case "-RCB-" => '}'
case _ => c
}
}
val x = binCharacter(c)
if (result.length > 1 && (result.last == x)) {
result += 'e'
()
} else if (result.length > 2 && result.last == 'e' && result(result.length - 2) == x) {
() // nothing, already have our e
} else {
result += x
}
}
}
def binCharacter(c: Char): Char = {
if (c.isLetter && c.isUpper) 'X' else if (c.isLetter) 'x' else if (c.isDigit) 'd' else c
}
val distanceBinner = DistanceBinner()
// Similar, but has the capability to use more and richer context
def splitShapeFor(words: IndexedSeq[String], begin: Int, split : Int , end: Int, numContextWords: Int = 1, richContext: Boolean = false, commonWords: Set[String] = Set.empty) = {
val result = new StringBuilder(end-begin)
var i = begin - numContextWords;
while (i < begin) {
if (i < 0) {
result += '#'
} else {
if(commonWords(words(i))) {
result ++= words(i)
} else if (richContext) {
appendWordShape(i, words, result)
} else {
result += binCharacter(words(i).head)
}
}
i += 1;
}
result += '['
while (i < math.min(begin + MAX_LEN/2 + 1, end)) {
appendWordShape(i, words, result)
i += 1
}
if(i <= split) {
if(i < split) {
result += '~'
}
appendWordShape(split, words, result)
appendWordShape(split + 1, words, result)
i = split + 2
}
if(i < end) {
//val remainingLength = distanceBinner.binnedDistance(begin, end - MAX_LEN)
//result ++= "~" * remainingLength
result += '~'
i = math.min(split, end - MAX_LEN/2)
}
while (i < end) {
appendWordShape(i, words, result)
i += 1
}
result += ']';
while (i < end + numContextWords) {
if (i >= words.length) {
result += '#';
} else {
if(commonWords(words(i))) {
result ++= words(i)
} else if (richContext) {
appendWordShape(i, words, result)
} else {
result += binCharacter(words(i).head)
}
}
i += 1;
}
result.toString
}
}
case class SpanShapeFeature(shape: String) extends SpanFeature
| maxim-rabinovich/epic | src/main/scala/epic/features/SpanShapeGenerator.scala | Scala | apache-2.0 | 7,581 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{File, FileOutputStream, IOException, OutputStreamWriter}
import java.net.{InetAddress, UnknownHostException, URI}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{Locale, Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.util.control.NonFatal
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.DataOutputBuffer
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.deploy.yarn.security.ConfigurableCredentialManager
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils}
import org.apache.spark.util.{CallerContext, Utils}
private[spark] class Client(
val args: ClientArguments,
val hadoopConf: Configuration,
val sparkConf: SparkConf)
extends Logging {
import Client._
import YarnSparkHadoopUtil._
def this(clientArgs: ClientArguments, spConf: SparkConf) =
this(clientArgs, SparkHadoopUtil.get.newConfiguration(spConf), spConf)
private val yarnClient = YarnClient.createYarnClient
private val yarnConf = new YarnConfiguration(hadoopConf)
private val isClusterMode = sparkConf.get("spark.submit.deployMode", "client") == "cluster"
// AM related configurations
private val amMemory = if (isClusterMode) {
sparkConf.get(DRIVER_MEMORY).toInt
} else {
sparkConf.get(AM_MEMORY).toInt
}
private val amMemoryOverhead = {
val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD
sparkConf.get(amMemoryOverheadEntry).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
}
private val amCores = if (isClusterMode) {
sparkConf.get(DRIVER_CORES)
} else {
sparkConf.get(AM_CORES)
}
// Executor related configurations
private val executorMemory = sparkConf.get(EXECUTOR_MEMORY)
private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
private val distCacheMgr = new ClientDistributedCacheManager()
private var loginFromKeytab = false
private var principal: String = null
private var keytab: String = null
private var credentials: Credentials = null
private var amKeytabFileName: String = null
private val launcherBackend = new LauncherBackend() {
override def onStopRequest(): Unit = {
if (isClusterMode && appId != null) {
yarnClient.killApplication(appId)
} else {
setState(SparkAppHandle.State.KILLED)
stop()
}
}
}
private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION)
private var appId: ApplicationId = null
// The app staging dir based on the STAGING_DIR configuration if configured
// otherwise based on the users home directory.
private val appStagingBaseDir = sparkConf.get(STAGING_DIR).map { new Path(_) }
.getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
private val credentialManager = new ConfigurableCredentialManager(sparkConf, hadoopConf)
def reportLauncherState(state: SparkAppHandle.State): Unit = {
launcherBackend.setState(state)
}
def stop(): Unit = {
launcherBackend.close()
yarnClient.stop()
// Unset YARN mode system env variable, to allow switching between cluster types.
System.clearProperty("SPARK_YARN_MODE")
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
*/
def submitApplication(): ApplicationId = {
var appId: ApplicationId = null
try {
launcherBackend.connect()
// Setup the credentials before doing anything else,
// so we have don't have issues at any point.
setupCredentials()
yarnClient.init(yarnConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
Option(appId.toString)).setCurrentContext()
// Verify whether the cluster has enough resources for our AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
logInfo(s"Submitting application $appId to ResourceManager")
yarnClient.submitApplication(appContext)
launcherBackend.setAppId(appId.toString)
reportLauncherState(SparkAppHandle.State.SUBMITTED)
appId
} catch {
case e: Throwable =>
if (appId != null) {
cleanupStagingDir(appId)
}
throw e
}
}
/**
* Cleanup application staging directory.
*/
private def cleanupStagingDir(appId: ApplicationId): Unit = {
val stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
try {
val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES)
val fs = stagingDirPath.getFileSystem(hadoopConf)
if (!preserveFiles && fs.delete(stagingDirPath, true)) {
logInfo(s"Deleted staging directory $stagingDirPath")
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
/**
* Set up the context for submitting our ApplicationMaster.
* This uses the YarnClientApplication not available in the Yarn alpha API.
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark"))
appContext.setQueue(sparkConf.get(QUEUE_NAME))
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType("SPARK")
sparkConf.get(APPLICATION_TAGS).foreach { tags =>
appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava))
}
sparkConf.get(MAX_APP_ATTEMPTS) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " +
"Cluster's default value will be used.")
}
sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval =>
appContext.setAttemptFailuresValidityInterval(interval)
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(amMemory + amMemoryOverhead)
capability.setVirtualCores(amCores)
sparkConf.get(AM_NODE_LABEL_EXPRESSION) match {
case Some(expr) =>
val amRequest = Records.newRecord(classOf[ResourceRequest])
amRequest.setResourceName(ResourceRequest.ANY)
amRequest.setPriority(Priority.newInstance(0))
amRequest.setCapability(capability)
amRequest.setNumContainers(1)
amRequest.setNodeLabelExpression(expr)
appContext.setAMContainerResourceRequest(amRequest)
case None =>
appContext.setResource(capability)
}
sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern =>
try {
val logAggregationContext = Records.newRecord(classOf[LogAggregationContext])
// These two methods were added in Hadoop 2.6.4, so we still need to use reflection to
// avoid compile error when building against Hadoop 2.6.0 ~ 2.6.3.
val setRolledLogsIncludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsIncludePattern", classOf[String])
setRolledLogsIncludePatternMethod.invoke(logAggregationContext, includePattern)
sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern =>
val setRolledLogsExcludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsExcludePattern", classOf[String])
setRolledLogsExcludePatternMethod.invoke(logAggregationContext, excludePattern)
}
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " +
"does not support it", e)
}
}
appContext
}
/** Set up security tokens for launching our ApplicationMaster container. */
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val dob = new DataOutputBuffer
credentials.writeTokenStorageToStream(dob)
amContainer.setTokens(ByteBuffer.wrap(dob.getData))
}
/** Get the application report from the ResourceManager for an application we have submitted. */
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* If no security is enabled, the token returned by the report is null.
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem = executorMemory + executorMemoryOverhead
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory ($executorMemory" +
s"+$executorMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " +
"'yarn.nodemanager.resource.memory-mb'.")
}
val amMem = amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory ($amMemory" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* The file is only copied if the source and destination file systems are different. This is used
* for preparing resources for launching the ApplicationMaster container. Exposed for testing.
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short,
symlinkCache: Map[URI, Path],
force: Boolean = false,
destName: Option[String] = None): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (force || !compareFs(srcFs, destFs)) {
destPath = new Path(destDir, destName.getOrElse(srcPath.getName()))
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
val qualifiedDestPath = destFs.makeQualified(destPath)
val qualifiedDestDir = qualifiedDestPath.getParent
val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), {
val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestDir)
})
new Path(resolvedDestDir, qualifiedDestPath.getName())
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* This is used for setting up a container launch context for our ApplicationMaster.
* Exposed for testing.
*/
def prepareLocalResources(
destDir: Path,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = destDir.getFileSystem(hadoopConf)
// Merge credentials obtained from registered providers
val nearestTimeOfNextRenewal = credentialManager.obtainCredentials(hadoopConf, credentials)
if (credentials != null) {
// Add credentials to current user's UGI, so that following operations don't need to use the
// Kerberos tgt to get delegations again in the client side.
UserGroupInformation.getCurrentUser.addCredentials(credentials)
logDebug(YarnSparkHadoopUtil.get.dumpTokens(credentials).mkString("\\n"))
}
// If we use principal and keytab to login, also credentials can be renewed some time
// after current time, we should pass the next renewal and updating time to credential
// renewer and updater.
if (loginFromKeytab && nearestTimeOfNextRenewal > System.currentTimeMillis() &&
nearestTimeOfNextRenewal != Long.MaxValue) {
// Valid renewal time is 75% of next renewal time, and the valid update time will be
// slightly later then renewal time (80% of next renewal time). This is to make sure
// credentials are renewed and updated before expired.
val currTime = System.currentTimeMillis()
val renewalTime = (nearestTimeOfNextRenewal - currTime) * 0.75 + currTime
val updateTime = (nearestTimeOfNextRenewal - currTime) * 0.8 + currTime
sparkConf.set(CREDENTIALS_RENEWAL_TIME, renewalTime.toLong)
sparkConf.set(CREDENTIALS_UPDATE_TIME, updateTime.toLong)
}
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
// Used to keep track of URIs(files) added to the distribute cache have the same name. If
// same name but different path files are added multiple time, YARN will fail to launch
// containers for the app with an internal error.
val distributedNames = new HashSet[String]
val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort)
.getOrElse(fs.getDefaultReplication(destDir))
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val symlinkCache: Map[URI, Path] = HashMap[URI, Path]()
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
val fileName = new File(uri.getPath).getName
if (distributedUris.contains(uriStr)) {
logWarning(s"Same path resource $uri added multiple times to distributed cache.")
false
} else if (distributedNames.contains(fileName)) {
logWarning(s"Same name resource $uri added multiple times to distributed cache")
false
} else {
distributedUris += uriStr
distributedNames += fileName
true
}
}
/**
* Distribute a file to the cluster.
*
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
*
* @param path URI of the file to distribute.
* @param resType Type of resource being distributed.
* @param destName Name of the file in the distributed cache.
* @param targetDir Subdirectory where to place the file.
* @param appMasterOnly Whether to distribute only to the AM.
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
if (loginFromKeytab) {
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(amKeytabFileName),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Add Spark to the cache. There are two settings that control what files to add to the cache:
* - if a Spark archive is defined, use the archive. The archive is expected to contain
* jar files at its root directory.
* - if a list of jars is provided, filter the non-local ones, resolve globs, and
* add the found files to the cache.
*
* Note that the archive cannot be a "local" URI. If none of the above settings are found,
* then upload all files found in $SPARK_HOME/jars.
*/
val sparkArchive = sparkConf.get(SPARK_ARCHIVE)
if (sparkArchive.isDefined) {
val archive = sparkArchive.get
require(!isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.")
distribute(Utils.resolveURI(archive).toString,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
} else {
sparkConf.get(SPARK_JARS) match {
case Some(jars) =>
// Break the list of jars to upload, and resolve globs.
val localJars = new ArrayBuffer[String]()
jars.foreach { jar =>
if (!isLocalUri(jar)) {
val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf)
val pathFs = FileSystem.get(path.toUri(), hadoopConf)
pathFs.globStatus(path).filter(_.isFile()).foreach { entry =>
val uri = entry.getPath().toUri()
statCache.update(uri, entry)
distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR))
}
} else {
localJars += jar
}
}
// Propagate the local URIs to the containers using the configuration.
sparkConf.set(SPARK_JARS, localJars)
case None =>
// No configuration, so fall back to uploading local jar files.
logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " +
"to uploading libraries under SPARK_HOME.")
val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir(
sparkConf.getenv("SPARK_HOME")))
val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive))
try {
jarsStream.setLevel(0)
jarsDir.listFiles().foreach { f =>
if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
jarsStream.putNextEntry(new ZipEntry(f.getName))
Files.copy(f, jarsStream)
jarsStream.closeEntry()
}
}
} finally {
jarsStream.close()
}
distribute(jarsArchive.toURI.getPath,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
}
}
/**
* Copy user jar to the distributed cache if their scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
*/
Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar =>
val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME))
if (isLocal) {
require(localizedPath != null, s"Path $jar already distributed")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(APP_JAR, localizedPath)
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* Each resource category is represented by a 3-tuple of:
* (1) comma separated list of resources in this category,
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true),
(sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false),
(sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
flist.foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
// If addToClassPath, we ignore adding jar multiple times to distitrbuted cache.
if (addToClasspath) {
if (localizedPath != null) {
cachedSecondaryJarLinks += localizedPath
}
} else {
if (localizedPath == null) {
throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" +
" to the distributed cache.")
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks)
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f => distribute(f) }
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
sparkConf.get(PY_FILES).foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Update the configuration with all the distributed files, minus the conf archive. The
// conf archive will be handled by the AM differently so that we avoid having to send
// this configuration by other means. See SPARK-14602 for one reason of why this is needed.
distCacheMgr.updateConfiguration(sparkConf)
// Upload the conf archive to HDFS manually, and record its location in the configuration.
// This will allow the AM to know where the conf archive is in HDFS, so that it can be
// distributed to the containers.
//
// This code forces the archive to be copied, so that unit tests pass (since in that case both
// file systems are the same and the archive wouldn't normally be copied). In most (all?)
// deployments, the archive would be copied anyway, since it's a temp file in the local file
// system.
val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE)
val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf)
sparkConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString())
val localConfArchive = new Path(createConfArchive().toURI())
copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true,
destName = Some(LOCALIZED_CONF_ARCHIVE))
// Manually add the config archive to the cache manager so that the AM is launched with
// the proper files set up.
distCacheMgr.addResource(
remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE,
LOCALIZED_CONF_DIR, statCache, appMasterOnly = false)
// Clear the cache-related entries from the configuration to avoid them polluting the
// UI's environment page. This works for client mode; for cluster mode, this is handled
// by the AM.
CACHE_CONFIGS.foreach(sparkConf.remove)
localResources
}
/**
* Create an archive with the config files for distribution.
*
* These will be used by AM and executors. The files are zipped and added to the job as an
* archive, so that YARN will explode it when distributing to AM and executors. This directory
* is then added to the classpath of AM and executor process, just to make sure that everybody
* is using the same default config.
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
// Uploading $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that
// the executors will use the latest configurations instead of the default values. This is
// required when user changes log4j.properties directly to set the log configurations. If
// configuration file is provided through --files then executors will be taking configurations
// from --files instead of $SPARK_CONF_DIR/log4j.properties.
// Also uploading metrics.properties to distributed cache if exists in classpath.
// If user specify this file using --files then executors will use the one
// from --files instead.
for { prop <- Seq("log4j.properties", "metrics.properties")
url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop))
if url.getProtocol == "file" } {
hadoopConfFiles(prop) = new File(url.getPath)
}
Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR").foreach { envKey =>
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
val files = dir.listFiles()
if (files == null) {
logWarning("Failed to list files under directory " + dir)
} else {
files.foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
try {
confStream.setLevel(0)
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(name))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save Spark configuration to a file in the archive.
val props = new Properties()
sparkConf.getAll.foreach { case (k, v) => props.setProperty(k, v) }
// Override spark.yarn.key to point to the location in distributed cache which will be used
// by AM.
Option(amKeytabFileName).foreach { k => props.setProperty(KEYTAB.key, k) }
confStream.putNextEntry(new ZipEntry(SPARK_CONF_FILE))
val writer = new OutputStreamWriter(confStream, StandardCharsets.UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
confStream.closeEntry()
} finally {
confStream.close()
}
confArchive
}
/**
* Set up the environment for launching our ApplicationMaster container.
*/
private def setupLaunchEnv(
stagingDirPath: Path,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
populateClasspath(args, yarnConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH))
env("SPARK_YARN_MODE") = "true"
env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
if (loginFromKeytab) {
val credentialsFile = "credentials-" + UUID.randomUUID().toString
sparkConf.set(CREDENTIALS_FILE_PATH, new Path(stagingDirPath, credentialsFile).toString)
logInfo(s"Credentials file set to: $credentialsFile")
}
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != LOCAL_SCHEME) {
pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
if (pythonPath.nonEmpty) {
val pythonPathStr = (sys.env.get("PYTHONPATH") ++ pythonPath)
.mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
env("PYTHONPATH") = pythonPathStr
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathStr)
}
if (isClusterMode) {
// propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode
Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname =>
if (!env.contains(envname)) {
sys.env.get(envname).foreach(env(envname) = _)
}
}
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* This sets up the launch environment, java options, and the command for launching the AM.
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val appStagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
val pySparkArchives =
if (sparkConf.get(IS_PYTHON_APP)) {
findPySparkArchives()
} else {
Nil
}
val launchEnv = setupLaunchEnv(appStagingDirPath, pySparkArchives)
val localResources = prepareLocalResources(appStagingDirPath, pySparkArchives)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources.asJava)
amContainer.setEnvironment(launchEnv.asJava)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + amMemory + "m"
val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
if (isClusterMode) {
sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(libraryPaths)))
}
if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
throw new SparkException(msg)
}
if (opts.contains("-Xmx")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
s"(was '$opts'). Use spark.yarn.am.memory instead."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(Seq(paths))))
}
}
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
Seq("--properties-file", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++
Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands.asJava)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
launchEnv.foreach { case (k, v) => logDebug(s" $k -> $v") }
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(
YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
setupSecurityToken(amContainer)
amContainer
}
def setupCredentials(): Unit = {
loginFromKeytab = sparkConf.contains(PRINCIPAL.key)
if (loginFromKeytab) {
principal = sparkConf.get(PRINCIPAL).get
keytab = sparkConf.get(KEYTAB).orNull
require(keytab != null, "Keytab must be specified when principal is specified.")
logInfo("Attempting to login to the Kerberos" +
s" using principal: $principal and keytab: $keytab")
val f = new File(keytab)
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
amKeytabFileName = f.getName + "-" + UUID.randomUUID().toString
sparkConf.set(PRINCIPAL.key, principal)
}
// Defensive copy of the credentials
credentials = new Credentials(UserGroupInformation.getCurrentUser.getCredentials)
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
*
* @param appId ID of the application to monitor.
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* @param logApplicationReport Whether to log details of the application report every iteration.
* @return A pair of the yarn application state and the final application state.
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true): (YarnApplicationState, FinalApplicationStatus) = {
val interval = sparkConf.get(REPORT_INTERVAL)
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
cleanupStagingDir(appId)
return (YarnApplicationState.KILLED, FinalApplicationStatus.KILLED)
case NonFatal(e) =>
logError(s"Failed to contact YARN for application $appId.", e)
// Don't necessarily clean up staging dir because status is unknown
return (YarnApplicationState.FAILED, FinalApplicationStatus.FAILED)
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report))
} else if (lastState != state) {
logInfo(formatReportDetails(report))
}
}
if (lastState != state) {
state match {
case YarnApplicationState.RUNNING =>
reportLauncherState(SparkAppHandle.State.RUNNING)
case YarnApplicationState.FINISHED =>
report.getFinalApplicationStatus match {
case FinalApplicationStatus.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case FinalApplicationStatus.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
reportLauncherState(SparkAppHandle.State.FINISHED)
}
case YarnApplicationState.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case YarnApplicationState.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir(appId)
return (state, report.getFinalApplicationStatus)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return (state, report.getFinalApplicationStatus)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def formatReportDetails(report: ApplicationReport): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
)
// Use more loggable format if value is null or empty
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Submit an application to the ResourceManager.
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
*/
def run(): Unit = {
this.appId = submitApplication()
if (!launcherBackend.isConnected() && fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val (yarnApplicationState, finalApplicationStatus) = monitorApplication(appId)
if (yarnApplicationState == YarnApplicationState.FAILED ||
finalApplicationStatus == FinalApplicationStatus.FAILED) {
throw new SparkException(s"Application $appId finished with failed status")
}
if (yarnApplicationState == YarnApplicationState.KILLED ||
finalApplicationStatus == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalApplicationStatus == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, "py4j-0.10.4-src.zip")
require(py4jFile.exists(),
s"$py4jFile not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
private object Client extends Logging {
def main(argStrings: Array[String]) {
if (!sys.props.contains("SPARK_SUBMIT")) {
logWarning("WARNING: This client is deprecated and will be removed in a " +
"future version of Spark. Use ./bin/spark-submit with \\"--master yarn\\"")
}
// Set an env variable indicating we are running in YARN mode.
// Note that any env variable with the SPARK_ prefix gets propagated to all (remote) processes
System.setProperty("SPARK_YARN_MODE", "true")
val sparkConf = new SparkConf
// SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
// so remove them from sparkConf here for yarn mode.
sparkConf.remove("spark.jars")
sparkConf.remove("spark.files")
val args = new ClientArguments(argStrings)
new Client(args, sparkConf).run()
}
// Alias for the user jar
val APP_JAR_NAME: String = "__app__.jar"
// URI scheme that identifies local resources
val LOCAL_SCHEME = "local"
// Staging directory for any temporary jars or files
val SPARK_STAGING: String = ".sparkStaging"
// Staging directory is private! -> rwx--------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
val LOCALIZED_CONF_DIR = "__spark_conf__"
// File containing the conf archive in the AM. See prepareLocalResources().
val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip"
// Name of the file in the conf archive containing Spark configuration.
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
// Subdirectory where Spark libraries will be placed.
val LOCALIZED_LIB_DIR = "__spark_libs__"
/**
* Return the path to the given application's staging directory.
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
classPathElementsToAdd.foreach { c =>
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => s.toSeq
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => s.toSeq
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
private[yarn] def getDefaultMRApplicationClasspath: Seq[String] =
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq
/**
* Populate the classpath entry in the given environment map.
*
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(Environment.PWD.$$(), env)
addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env)
if (sparkConf.get(USER_CLASS_PATH_FIRST)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.get(APP_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE)))
} else {
getSecondaryJarUris(sparkConf.get(SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
// Add the Spark jars to the classpath, depending on how they were distributed.
addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env)
if (sparkConf.get(SPARK_ARCHIVE).isEmpty) {
sparkConf.get(SPARK_JARS).foreach { jars =>
jars.filter(isLocalUri).foreach { jar =>
addClasspathEntry(getClusterPath(sparkConf, jar), env)
}
}
}
populateHadoopClasspath(conf, env)
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
}
/**
* Returns a list of URIs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.get(APP_JAR))
val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR_NAME)))
}
private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = {
secondaryJars.getOrElse(Nil).map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
*
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* If the classpath is already set, this appends the new path to the existing classpath.
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
*
* This method uses two configuration values:
*
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
*
* If either config is not available, the input path is returned.
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get(GATEWAY_ROOT_PATH)
val clusterPath = conf.get(REPLACEMENT_ROOT_PATH)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Return whether the two file systems are the same.
*/
private def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* This is used for preparing local resources to be included in the container launch context.
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.get(DRIVER_USER_CLASS_PATH_FIRST)
} else {
conf.get(EXECUTOR_USER_CLASS_PATH_FIRST)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
/** Returns whether the URI is a "local:" URI. */
def isLocalUri(uri: String): Boolean = {
uri.startsWith(s"$LOCAL_SCHEME:")
}
}
| JerryLead/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala | Scala | apache-2.0 | 60,460 |
package com.github.dzhg.tedis.protocol
import java.io.InputStream
import com.github.dzhg.tedis.protocol.RESP._
import scala.collection.mutable
class RESPReader(in: InputStream) {
def readValue(): RESPValue = {
val c = in.read().toByte
c match {
case SIMPLE_STRING => readSimpleString()
case INTEGER => readIntegerValue()
case ERROR => readError()
case BULK_STRING => readBulkString()
case ARRAY => readArray()
case EOF => EOFValue
}
}
def readSimpleString(): SimpleStringValue = {
val s = readUntil(CR)
readUntil(LF)
SimpleStringValue(s)
}
def readIntegerValue(): IntegerValue = {
val s = readUntil(CR)
readUntil(LF)
IntegerValue(s)
}
def readError(): ErrorValue = {
val s: String = readUntil(CR)
readUntil(LF)
val i = s.indexOf(" ")
s.splitAt(i)
}
def readBulkString(): BulkStringValue = {
val length: Long = readUntil(CR)
readUntil(LF)
length match {
case -1 =>
BulkStringValue(None)
case 0 =>
readUntil(LF)
BulkStringValue(Some(""))
case n =>
val v = readBy(n.toInt)
readUntil(LF)
BulkStringValue(Some(v))
}
}
def readArray(): ArrayValue = {
val count: Long = readUntil(CR)
readUntil(LF)
count match {
case -1 => ArrayValue(None)
case 0 => ArrayValue(Some(Seq.empty))
case n => 0.until(n.toInt).map(_ => readValue())
}
}
def readBy(length: Int): Array[Int] = {
val arr: mutable.ArrayBuilder[Int] = mutable.ArrayBuilder.make()
0.until(length).foreach { _ =>
arr += in.read()
}
arr.result()
}
def readUntil(end: Int): Array[Int] = {
val arr: mutable.ArrayBuilder[Int] = mutable.ArrayBuilder.make()
Iterator.from(1).takeWhile { _ =>
val c = in.read()
val done = c == end
if (!done) arr += c
!done
}.foreach(noop)
arr.result()
}
def noop(i: Int): Unit = {}
}
| dzhg/tedis | src/main/scala/com/github/dzhg/tedis/protocol/RESPReader.scala | Scala | mit | 1,971 |
object Sample {
def main(args : Array[String]) {
val s = Array.ofDim[String](2, 2)
s(1)(1) = "test"
"stop here"
}
} | consulo/consulo-scala | testdata/debugger/ScalaMethodEvaluation/arrayApplyFunction/src/Sample.scala | Scala | apache-2.0 | 131 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, Statistics}
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.{Append, Complete, Update}
import org.apache.spark.sql.execution.streaming.Sink
import org.apache.spark.sql.sources.v2.{DataSourceOptions, DataSourceV2, StreamWriteSupport}
import org.apache.spark.sql.sources.v2.writer._
import org.apache.spark.sql.sources.v2.writer.streaming.StreamWriter
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
/**
* A sink that stores the results in memory. This [[Sink]] is primarily intended for use in unit
* tests and does not provide durability.
*/
class MemorySinkV2 extends DataSourceV2 with StreamWriteSupport with Logging {
override def createStreamWriter(
queryId: String,
schema: StructType,
mode: OutputMode,
options: DataSourceOptions): StreamWriter = {
new MemoryStreamWriter(this, mode)
}
private case class AddedData(batchId: Long, data: Array[Row])
/** An order list of batches that have been written to this [[Sink]]. */
@GuardedBy("this")
private val batches = new ArrayBuffer[AddedData]()
/** Returns all rows that are stored in this [[Sink]]. */
def allData: Seq[Row] = synchronized {
batches.flatMap(_.data)
}
def latestBatchId: Option[Long] = synchronized {
batches.lastOption.map(_.batchId)
}
def latestBatchData: Seq[Row] = synchronized {
batches.lastOption.toSeq.flatten(_.data)
}
def toDebugString: String = synchronized {
batches.map { case AddedData(batchId, data) =>
val dataStr = try data.mkString(" ") catch {
case NonFatal(e) => "[Error converting to string]"
}
s"$batchId: $dataStr"
}.mkString("\n")
}
def write(batchId: Long, outputMode: OutputMode, newRows: Array[Row]): Unit = {
val notCommitted = synchronized {
latestBatchId.isEmpty || batchId > latestBatchId.get
}
if (notCommitted) {
logDebug(s"Committing batch $batchId to $this")
outputMode match {
case Append | Update =>
val rows = AddedData(batchId, newRows)
synchronized { batches += rows }
case Complete =>
val rows = AddedData(batchId, newRows)
synchronized {
batches.clear()
batches += rows
}
case _ =>
throw new IllegalArgumentException(
s"Output mode $outputMode is not supported by MemorySink")
}
} else {
logDebug(s"Skipping already committed batch: $batchId")
}
}
def clear(): Unit = synchronized {
batches.clear()
}
override def toString(): String = "MemorySink"
}
case class MemoryWriterCommitMessage(partition: Int, data: Seq[Row]) extends WriterCommitMessage {}
class MemoryWriter(sink: MemorySinkV2, batchId: Long, outputMode: OutputMode)
extends DataSourceWriter with Logging {
override def createWriterFactory: MemoryWriterFactory = MemoryWriterFactory(outputMode)
def commit(messages: Array[WriterCommitMessage]): Unit = {
val newRows = messages.flatMap {
case message: MemoryWriterCommitMessage => message.data
}
sink.write(batchId, outputMode, newRows)
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {
// Don't accept any of the new input.
}
}
class MemoryStreamWriter(val sink: MemorySinkV2, outputMode: OutputMode)
extends StreamWriter {
override def createWriterFactory: MemoryWriterFactory = MemoryWriterFactory(outputMode)
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
val newRows = messages.flatMap {
case message: MemoryWriterCommitMessage => message.data
}
sink.write(epochId, outputMode, newRows)
}
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
// Don't accept any of the new input.
}
}
case class MemoryWriterFactory(outputMode: OutputMode) extends DataWriterFactory[Row] {
override def createDataWriter(
partitionId: Int,
attemptNumber: Int,
epochId: Long): DataWriter[Row] = {
new MemoryDataWriter(partitionId, outputMode)
}
}
class MemoryDataWriter(partition: Int, outputMode: OutputMode)
extends DataWriter[Row] with Logging {
private val data = mutable.Buffer[Row]()
override def write(row: Row): Unit = {
data.append(row)
}
override def commit(): MemoryWriterCommitMessage = {
val msg = MemoryWriterCommitMessage(partition, data.clone())
data.clear()
msg
}
override def abort(): Unit = {}
}
/**
* Used to query the data that has been written into a [[MemorySink]].
*/
case class MemoryPlanV2(sink: MemorySinkV2, override val output: Seq[Attribute]) extends LeafNode {
private val sizePerRow = output.map(_.dataType.defaultSize).sum
override def computeStats(): Statistics = Statistics(sizePerRow * sink.allData.size)
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/memoryV2.scala | Scala | apache-2.0 | 6,060 |
package com.twitter.finagle.exp
import com.twitter.concurrent.ForkingScheduler
import com.twitter.finagle.Failure
import com.twitter.finagle.Service
import com.twitter.finagle.ServiceFactory
import com.twitter.finagle.SimpleFilter
import com.twitter.finagle.Stack
import com.twitter.finagle.Stackable
import com.twitter.util._
/**
* Filter placed in finagle's default server stack to offload the
* execution of `Future` computations using a `ForkingScheduler`. It's
* disabled automatically in case the current scheduler doesn't
* support forking.
*
* This implementation doesn't have a client filter because it's
* expected that the forking scheduler will handle the thread shift
* back to its workers after a client returns.
*/
object ForkingSchedulerFilter {
private[this] val Role = Stack.Role("UseForkingScheduler")
private[this] val Description = "Forks the execution if the scheduler has forking capability"
private[finagle] sealed abstract class Param
private[finagle] object Param {
final case class Enabled(scheduler: ForkingScheduler) extends Param
final case object Disabled extends Param
implicit val param: Stack.Param[Param] = new Stack.Param[Param] {
lazy val default: Param =
ForkingScheduler() match {
case Some(scheduler) =>
Enabled(scheduler)
case None =>
Disabled
}
override def show(p: Param): Seq[(String, () => String)] = {
val enabledStr = p match {
case Enabled(scheduler) => scheduler.toString
case Disabled => "Disabled"
}
Seq(("ForkingSchedulerFilter", () => enabledStr))
}
}
}
def server[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module1[Param, ServiceFactory[Req, Rep]] {
def make(p: Param, next: ServiceFactory[Req, Rep]): ServiceFactory[Req, Rep] = p match {
case Param.Enabled(scheduler) => (new Server(scheduler)).andThen(next)
case Param.Disabled => next
}
def role: Stack.Role = Role
def description: String = Description
}
final class Server[Req, Rep](scheduler: ForkingScheduler) extends SimpleFilter[Req, Rep] {
private[this] final val overloadedFailure =
Future.exception(Failure.rejected("Forking scheduler overloaded"))
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] =
scheduler.tryFork(service(request)).flatMap {
case Some(v) => Future.value(v)
case None => overloadedFailure
}
}
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/exp/ForkingSchedulerFilter.scala | Scala | apache-2.0 | 2,528 |
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.hyperbus.serialization
import java.io.{Reader, StringReader}
import com.fasterxml.jackson.core.{JsonFactory, JsonParser}
import com.hypertino.binders.json.{JacksonParserAdapter, JsonBindersFactory}
import com.hypertino.binders.value.{Obj, Value}
import com.hypertino.hyperbus.model.{Body, Header, Headers, Message, MessageHeaders}
object MessageReader {
def read[M <: Message[_ <: Body,_ <: MessageHeaders]](reader: Reader, concreteDeserializer: MessageDeserializer[M]): M = {
val jacksonFactory = new JsonFactory()
jacksonFactory.disable(JsonParser.Feature.AUTO_CLOSE_SOURCE)
val jp = jacksonFactory.createParser(reader)
val headers = try {
val adapter = new JacksonParserAdapter(jp)
val headers = JsonBindersFactory.findFactory().withJsonParserApi(adapter) { jpa ⇒
val headersSeq = jpa.unbind[Value].asInstanceOf[Obj].v.toSeq // todo: this isn't great, also see https://github.com/hypertino/binders/issues/2
val transformedSeq = headersSeq.map {
case (Header.CONTENT_TYPE, value) ⇒ Header.CONTENT_TYPE → JsonContentTypeConverter.universalJsonContentTypeToSimple(value)
case other ⇒ other
}
Headers(transformedSeq: _*)
}
jp.nextToken()
val offset = jp.getTokenLocation.getCharOffset
reader.reset()
reader.skip(offset)
headers
}
finally {
jp.close()
}
concreteDeserializer(reader, headers)
}
def fromString[M <: Message[_ <: Body,_ <: MessageHeaders]](message: String, concreteDeserializer: MessageDeserializer[M]): M = {
val stringReader = new StringReader(message)
try {
read(stringReader, concreteDeserializer)
}
finally {
stringReader.close()
}
}
}
| hypertino/hyperbus | hyperbus/src/main/scala/com/hypertino/hyperbus/serialization/MessageReader.scala | Scala | mpl-2.0 | 2,038 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.thrift.macros.impl
import com.twitter.scalding.serialization.macros.impl.OrderedSerializationProviderImpl
import com.twitter.scalding.serialization.macros.impl.ordered_serialization._
import com.twitter.scalding.serialization.OrderedSerialization
import com.twitter.scalding.thrift.macros.impl.ordered_serialization.{ ScroogeEnumOrderedBuf, ScroogeUnionOrderedBuf, ScroogeOrderedBuf, ScroogeOuterOrderedBuf }
import scala.language.experimental.macros
import scala.reflect.macros.Context
// The flow here is that we start with the outer dispatcher. Outer dispatcher is the only one allowed to recurse into a thrift struct `ScroogeOrderedBuf.dispatch`.
// However it cannot use implicits at the top level. Otherwise this would always be able to return once with an implicit. We will use implicits for members inside
// that struct as needed however.
// The inner ones can recurse into Enum's and Union's, but will use the `ScroogeOuterOrderedBuf` to ensure we drop an implicit down to jump back out.
object ScroogeInternalOrderedSerializationImpl {
// The base dispatcher
// This one is able to handle all scrooge types along with all normal scala types too
// One exception is that if it meets another thrift struct it will hit the ScroogeOuterOrderedBuf
// which will inject an implicit lazy val for a new OrderedSerialization and then exit the macro.
// This avoids methods becoming too long via inlining.
private def baseScroogeDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
import c.universe._
def buildDispatcher: PartialFunction[c.Type, TreeOrderedBuf[c.type]] = ScroogeInternalOrderedSerializationImpl.innerDispatcher(c)
val scroogeEnumDispatcher = ScroogeEnumOrderedBuf.dispatch(c)
val scroogeUnionDispatcher = ScroogeUnionOrderedBuf.dispatch(c)(buildDispatcher)
val scroogeOuterOrderedBuf = ScroogeOuterOrderedBuf.dispatch(c)
OrderedSerializationProviderImpl.normalizedDispatcher(c)(buildDispatcher)
.orElse(scroogeEnumDispatcher)
.orElse(scroogeUnionDispatcher)
.orElse(scroogeOuterOrderedBuf)
.orElse(OrderedSerializationProviderImpl.scaldingBasicDispatchers(c)(buildDispatcher))
}
private def innerDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
import c.universe._
baseScroogeDispatcher(c)
.orElse(OrderedSerializationProviderImpl.fallbackImplicitDispatcher(c))
.orElse {
case tpe: Type => c.abort(c.enclosingPosition, s"""Unable to find OrderedSerialization for type ${tpe}""")
}
}
// The outer dispatcher
// This is the dispatcher routine only hit when we enter in via an external call implicitly or explicitly to the macro.
// It has the ability to generate code for thrift structs, with the scroogeDispatcher.
private def outerDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
import c.universe._
OrderedSerializationProviderImpl.normalizedDispatcher(c)(ScroogeInternalOrderedSerializationImpl.outerDispatcher(c))
.orElse(ScroogeOrderedBuf.dispatch(c)(baseScroogeDispatcher(c)))
.orElse(baseScroogeDispatcher(c))
.orElse {
case tpe: Type => c.abort(c.enclosingPosition, s"""Unable to find OrderedSerialization for type ${tpe}""")
}
}
def apply[T](c: Context)(implicit T: c.WeakTypeTag[T]): c.Expr[OrderedSerialization[T]] = {
val b: TreeOrderedBuf[c.type] = outerDispatcher(c)(T.tpe)
TreeOrderedBuf.toOrderedSerialization[T](c)(b)
}
}
| tdyas/scalding | scalding-thrift-macros/src/main/scala/com/twitter/scalding/thrift/macros/impl/ScroogeInternalOrderedSerializationImpl.scala | Scala | apache-2.0 | 4,101 |
package models
class SlackConfiguration(val botName: String,
val iconEmoji: String,
val fallbackChannel: String,
val incommingUrl: String) {
}
| uqtimes/SlackBotScala | app/models/SlackConfiguration.scala | Scala | mit | 219 |
/*
Copyright (c) 2012 Joshua Garnett
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package com.adverserealms.astar.basic2d
import scala.collection.immutable.List
import scala.collection.mutable.ListBuffer
import com.adverserealms.astar.core._
import com.adverserealms.astar.basic2d._
import org.slf4j.{ Logger, LoggerFactory }
class MockLargeSquareGridMap extends AstarMap {
protected lazy val log = LoggerFactory.getLogger(getClass())
private val MAP_WIDTH = 8
private val MAP_HEIGHT: Int = 8
private val tiles: List[MockSquareTile] = populateMockTiles()
private val diagonalMultiplier = 1.4d
private val normalMultiplier = 1.0d
private val defaultCost = 1.0d
/**
* Map: x's are not walkable
*
* 00000000
* 00000000
* 000000x0
* 000000x0
* 000000x0
* 000000x0
* 00xxxxx0
* 00000000
*/
def populateMockTiles(): List[MockSquareTile] = {
val tiles = new ListBuffer[MockSquareTile]
for (y <- 0 until MAP_HEIGHT) {
for (x <- 0 until MAP_WIDTH) {
val tile = new MockSquareTile(new Point(x, y))
if (x == 6 && y > 1 && y < 7) {
tile.setWalkable(false)
}
if (y == 6 && x > 1 && x < 7) {
tile.setWalkable(false)
}
tiles += tile
}
}
tiles.toList
}
def getNeighbors(tile: AstarTile): List[AstarTile] = {
val neighbors = new ListBuffer[AstarTile]
val position = tile.asInstanceOf[PositionTile].getPosition()
val x = position.getX
val y = position.getY
//up, left
if (getTile(x - 1, y - 1) != null) {
neighbors += getTile(x - 1, y - 1)
}
//up
if (getTile(x, y - 1) != null) {
neighbors += getTile(x, y - 1)
}
//up, right
if (getTile(x + 1, y - 1) != null) {
neighbors += getTile(x + 1, y - 1)
}
//left
if (getTile(x - 1, y) != null) {
neighbors += getTile(x - 1, y)
}
//right
if (getTile(x + 1, y) != null) {
neighbors += getTile(x + 1, y)
}
//down, left
if (getTile(x - 1, y + 1) != null) {
neighbors += getTile(x - 1, y + 1)
}
//down
if (getTile(x, y + 1) != null) {
neighbors += getTile(x, y + 1)
}
//down, right
if (getTile(x + 1, y + 1) != null) {
neighbors += getTile(x + 1, y + 1)
}
neighbors.toList
}
def getTile(x: Int, y: Int): MockSquareTile = {
if (x < 0 || x >= MAP_WIDTH) {
null
}
else if (y < 0 || y >= MAP_HEIGHT) {
null
}
else {
tiles(x + (y * MAP_WIDTH))
}
}
def getHeuristic(tile: AstarTile, req: AstarPathRequest): Float = {
val start = tile.asInstanceOf[PositionTile].getPosition()
val end = req.end.asInstanceOf[PositionTile].getPosition()
//using a diagonal distance heuristic
val distance: Point = getXYDistanceBetweenPoints(start, end);
var h = scala.math.max(distance.getX, distance.getY)
h
}
private def getXYDistanceBetweenPoints(start: Point, end: Point): Point = {
new Point(getAxisDistance(start.getX, end.getX), getAxisDistance(start.getY, end.getY))
}
private def getAxisDistance(start: Int, end: Int): Int = {
scala.math.abs(start - end)
}
def getDistance(start: AstarTile, end: AstarTile): Float = {
val startP = start.asInstanceOf[PositionTile].getPosition()
val endP = end.asInstanceOf[PositionTile].getPosition()
if (startP.getX != endP.getX && startP.getY != endP.getY) {
//diagonal move
1.4f
}
else {
1.0f
}
}
} | joshgarnett/Astar-Scala | test/com/adverserealms/astar/basic2d/MockLargeSquareGridMap.scala | Scala | mit | 4,670 |
/*
* Created on 2010/06/07
* Copyright (c) 2010-2011, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glk
import scala.annotation.switch
import java.util.logging._
import org.zmpp.base._
class GlkDispatch(_state: VMState, glk: Glk) {
val dispatchLogger = Logger.getLogger("glk.dispatch")
def dispatch(id: Int, args: Array[Int]) : Int = {
/*
import FunctionSelector._
val selector = FunctionSelector(id)
val builder = new StringBuilder
builder.append("(")
for (i <- 0 until args.length) {
if (i > 0) builder.append(", ")
builder.append(args(i))
}
builder.append(")")
dispatchLogger.info("@@%s%s".format(selector.toString, builder.toString))
*/
(id: @switch) match {
case 0x01 => _exit(args) // glk_exit
case 0x02 => _set_interrupt_handler(args) // glk_set_interrupt_handler
case 0x03 => _tick(args) // glk_tick
case 0x04 => _gestalt(args) // glk_gestalt
case 0x05 => _gestalt_ext(args) // glk_gestalt_ext
case 0x20 => _window_iterate(args) // glk_window_iterate
case 0x21 => _window_get_rock(args) // glk_window_get_rock
case 0x22 => _window_get_root(args) // glk_window_get_root
case 0x23 => _window_open(args) // glk_window_open
case 0x24 => _window_close(args) // glk_window_close
case 0x25 => _window_get_size(args) // glk_window_get_size
case 0x26 => _window_set_arrangement(args) // glk_window_set_arrangement
case 0x27 => _window_get_arrangement(args) // glk_window_get_arranngement
case 0x28 => _window_get_type(args) // glk_window_get_type
case 0x29 => _window_get_parent(args) // glk_window_get_parent
case 0x2a => _window_clear(args) // glk_window_clear
case 0x2b => _window_move_cursor(args) // glk_window_move_cursor
case 0x2c => _window_get_stream(args) // glk_window_get_stream
case 0x2d => _window_set_echo_stream(args) // glk_window_set_echo_stream
case 0x2e => _window_get_echo_stream(args) // glk_window_get_echo_stream
case 0x2f => _set_window(args) // glk_set_window
case 0x30 => _window_get_sibling(args) // glk_window_get_sibling
case 0x40 => _stream_iterate(args) // glk_stream_iterate
case 0x41 => _stream_get_rock(args) // glk_stream_get_rock
case 0x42 => _stream_open_file(args) // glk_stream_open_file
case 0x43 => _stream_open_memory(args) // glk_stream_open_memory
case 0x44 => _stream_close(args) // glk_stream_close
case 0x45 => _stream_set_position(args) // glk_stream_set_position
case 0x46 => _stream_get_position(args) // glk_stream_get_position
case 0x47 => _stream_set_current(args) // glk_stream_set_current
case 0x48 => _stream_get_current(args) // glk_stream_get_current
case 0x60 => _fileref_create_temp(args) // glk_fileref_create_temp
case 0x61 => _fileref_create_by_name(args) // glk_fileref_create_by_name
case 0x62 => _fileref_create_by_prompt(args) // glk_fileref_create_by_prompt
case 0x63 => _fileref_destroy(args) // glk_fileref_destroy
case 0x64 => _fileref_iterate(args) // glk_fileref_iterate
case 0x65 => _fileref_get_rock(args) // glk_fileref_get_rock
case 0x66 => _fileref_delete_file(args) // glk_fileref_delete_file
case 0x67 => _fileref_does_file_exist(args) // glk_fileref_does_file_exist
case 0x68 => _fileref_create_from_fileref(args) // glk_fileref_create_from_fileref
case 0x80 => _put_char(args) // glk_put_char
case 0x81 => _put_char_stream(args) // glk_put_char_stream
case 0x82 => _put_string(args) // glk_put_string
case 0x83 => _put_string_stream(args) // glk_put_string_stream
case 0x84 => _put_buffer(args) // glk_put_buffer
case 0x85 => _put_buffer_stream(args) // glk_put_buffer_stream
case 0x86 => _set_style(args) // glk_set_style
case 0x87 => _set_style_stream(args) // glk_set_style_stream
case 0x90 => _get_char_stream(args) // glk_get_char_stream
case 0x91 => _get_line_stream(args) // glk_get_line_stream
case 0x92 => _get_buffer_stream(args) // glk_get_buffer_stream
case 0xa0 => _char_to_lower(args) // glk_char_to_lower
case 0xa1 => _char_to_upper(args) // glk_char_to_upper
case 0xb0 => _stylehint_set(args) // glk_stylehint_set
case 0xb1 => _stylehint_clear(args) // glk_stylehint_clear
case 0xb2 => _style_distinguish(args) // glk_style_distinguish
case 0xb3 => _style_measure(args) // glk_style_measure
case 0xc0 => _select(args) // glk_select
case 0xc1 => _select_poll(args) // glk_select_poll
case 0xd0 => _request_line_event(args) // glk_request_line_event
case 0xd1 => _cancel_line_event(args) // glk_cancel_line_event
case 0xd2 => _request_char_event(args) // glk_request_char_event
case 0xd3 => _cancel_char_event(args) // glk_cancel_char_event
case 0xd4 => _request_mouse_event(args) // glk_request_mouse_event
case 0xd5 => _cancel_mouse_event(args) // glk_cancel_mouse_event
case 0xd6 => _request_timer_events(args) // glk_request_timer_events
case 0xe0 => _image_get_info(args) // glk_image_get_info
case 0xe1 => _image_draw(args) // glk_image_draw
case 0xe2 => _image_draw_scaled(args) // glk_image_draw_scaled
case 0xe8 => _window_flow_break(args) // glk_window_flow_break
case 0xe9 => _window_erase_rect(args) // glk_window_erase_rect
case 0xea => _window_fill_rect(args) // glk_window_fill_rect
case 0xeb => _window_set_background_color(args) // glk_window_set_background_color
case 0xf0 => _schannel_iterate(args) // glk_schannel_iterate
case 0xf1 => _schannel_get_rock(args) // glk_schannel_get_rock
case 0xf2 => _schannel_create(args) // glk_schannel_create
case 0xf3 => _schannel_destroy(args) // glk_destroy
case 0xf8 => _schannel_play(args) // glk_schannel_play
case 0xf9 => _schannel_play_ext(args) // glk_schannel_play_ext
case 0xfa => _schannel_stop(args) // glk_schannel_stop
case 0xfb => _schannel_set_volume(args) // glk_schannel_set_volume
case 0xfc => _sound_load_hint(args) // glk_sound_load_hint
case 0x100 => _set_hyperlink(args) // glk_set_hyperlink
case 0x101 => _set_hyperlink_stream(args) // glk_set_hyperlink_stream
case 0x102 => _request_hyperlink_event(args) // glk_request_hyperlink_event
case 0x103 => _cancel_hyperlink_event(args) // glk_cancel_hyperlink_event
case 0x120 => _buffer_to_lower_case_uni(args) // glk_buffer_to_lower_case_uni
case 0x121 => _buffer_to_upper_case_uni(args) // glk_buffer_to_upper_case_uni
case 0x122 => _buffer_to_title_case_uni(args) // glk_buffer_to_title_case_uni
case 0x128 => _put_char_uni(args) // glk_put_char_uni
case 0x129 => _put_string_uni(args) // glk_put_string_uni
case 0x12a => _put_buffer_uni(args) // glk_put_buffer_uni
case 0x12b => _put_char_stream_uni(args) // glk_put_char_stream_uni
case 0x12c => _put_string_stream_uni(args) // glk_put_string_stream_uni
case 0x12d => _put_buffer_stream_uni(args) // glk_put_buffer_stream_uni
case 0x130 => _get_char_stream_uni(args) // glk_get_char_stream_uni
case 0x131 => _get_buffer_stream_uni(args) // glk_get_buffer_stream_uni
case 0x132 => _get_line_stream_uni(args) // glk_get_line_stream_uni
case 0x138 => _stream_open_file_uni(args) // glk_stream_open_file_uni
case 0x139 => _stream_open_memory_uni(args) // glk_stream_open_memory_uni
case 0x140 => _request_char_event_uni(args) // glk_request_char_event_uni
case 0x141 => _request_line_event_uni(args) // glk_request_line_event_uni
case _ =>
throw new IllegalArgumentException("unknown GLK id: $%02x".format(id))
}
}
// ***********************************************************************
// ***** Dispatch Interface
// ***** These functions just delegate to their respective
// ***** static glk_* functions. The entry point is the dispatch()
// ***** method, usually called by the glk instruction in Glulx
// ***********************************************************************
private def _buffer_to_lower_case_uni(args: Array[Int]): Int = {
glk.buffer_to_lower_case_uni(_state, args(0), args(1), args(2))
}
private def _buffer_to_upper_case_uni(args: Array[Int]): Int = {
glk.buffer_to_upper_case_uni(_state, args(0), args(1), args(2))
}
private def _buffer_to_title_case_uni(args: Array[Int]): Int = {
glk.buffer_to_title_case_uni(_state, args(0), args(1), args(2), args(3))
}
private def _cancel_char_event(args: Array[Int]): Int = {
glk.cancel_char_event(args(0))
0
}
private def _cancel_hyperlink_event(args: Array[Int]): Int = {
glk.cancel_hyperlink_event(args(0))
0
}
private def _cancel_line_event(args: Array[Int]): Int = {
glk.cancel_line_event(args(0), args(1))
0
}
private def _cancel_mouse_event(args: Array[Int]): Int = {
glk.cancel_mouse_event(args(0))
0
}
private def _char_to_lower(args: Array[Int]): Int = {
glk.char_to_lower((args(0) & 0xff).asInstanceOf[Char])
}
private def _char_to_upper(args: Array[Int]): Int = {
glk.char_to_upper((args(0) & 0xff).asInstanceOf[Char])
}
private def _exit(args: Array[Int]): Int = {
glk.exit(_state)
0
}
private def _fileref_create_by_name(args: Array[Int]): Int = {
glk.fileref_create_by_name(args(0), cstringAt(args(1)), args(2))
}
private def _fileref_create_by_prompt(args: Array[Int]): Int = {
glk.fileref_create_by_prompt(args(0), args(1), args(2))
}
private def _fileref_create_from_fileref(args: Array[Int]): Int = {
glk.fileref_create_from_fileref(args(0), args(1), args(2))
}
private def _fileref_create_temp(args: Array[Int]): Int = {
glk.fileref_create_temp(args(0), args(1))
}
private def _fileref_delete_file(args: Array[Int]): Int = {
glk.fileref_delete_file(args(0))
0
}
private def _fileref_destroy(args: Array[Int]): Int = {
glk.fileref_destroy(args(0))
0
}
private def _fileref_does_file_exist(args: Array[Int]): Int = {
glk.fileref_does_file_exist(args(0))
}
private def _fileref_get_rock(args: Array[Int]): Int = {
glk.fileref_get_rock(args(0))
}
private def _fileref_iterate(args: Array[Int]): Int = {
returnIterateResult(glk.fileref_iterate(args(0)), args(1))
}
private def _gestalt(args: Array[Int]): Int = {
glk.gestalt(args(0), args(1))
}
private def _gestalt_ext(args: Array[Int]): Int = {
glk.gestalt_ext(_state, args(0), args(1), args(2), args(3))
}
private def _get_buffer_stream(args: Array[Int]): Int = {
glk.get_buffer_stream(_state, args(0), args(1), args(2))
}
private def _get_buffer_stream_uni(args: Array[Int]): Int = {
glk.get_buffer_stream_uni(_state, args(0), args(1), args(2))
}
private def _get_char_stream(args: Array[Int]): Int = {
glk.get_char_stream(args(0))
}
private def _get_char_stream_uni(args: Array[Int]): Int = {
glk.get_char_stream_uni(args(0))
}
private def _get_line_stream(args: Array[Int]): Int = {
glk.get_line_stream(_state, args(0), args(1), args(2))
}
private def _get_line_stream_uni(args: Array[Int]): Int = {
glk.get_line_stream_uni(_state, args(0), args(1), args(2))
}
private def _image_draw(args: Array[Int]): Int = {
glk.image_draw(args(0), args(1), args(2), args(3))
}
private def _image_draw_scaled(args: Array[Int]): Int = {
glk.image_draw_scaled(args(0), args(1), args(2), args(3), args(4), args(5))
}
private def _image_get_info(args: Array[Int]): Int = {
val dim = glk.image_get_info(args(0))
val widthPtr = args(1)
val heightPtr = args(2)
if (dim == null) 0
else {
if (widthPtr != 0) _state.setMemIntAt(widthPtr, dim.width)
if (heightPtr != 0) _state.setMemIntAt(heightPtr, dim.height)
1
}
}
private def _put_buffer(args: Array[Int]): Int = {
glk.put_buffer(_state, args(0), args(1))
0
}
private def _put_buffer_stream(args: Array[Int]): Int = {
glk.put_buffer_stream(_state, args(0), args(1), args(2))
0
}
private def _put_buffer_stream_uni(args: Array[Int]): Int = {
glk.put_buffer_stream_uni(_state, args(0), args(1), args(2))
0
}
private def _put_buffer_uni(args: Array[Int]): Int = {
glk.put_buffer_uni(_state, args(0), args(1))
0
}
private def _put_char(args: Array[Int]): Int = {
glk.put_char((args(0) & 0xffff).asInstanceOf[Char])
0
}
private def _put_char_uni(args: Array[Int]): Int = {
glk.put_char_uni(args(0))
0
}
private def _put_char_stream(args: Array[Int]): Int = {
glk.put_char_stream(args(0), args(1).toChar)
0
}
private def _put_char_stream_uni(args: Array[Int]): Int = {
glk.put_char_stream_uni(args(0), args(1))
0
}
private def _put_string(args: Array[Int]): Int = {
glk.put_string(_state, args(0))
0
}
private def _put_string_stream(args: Array[Int]): Int = {
glk.put_string_stream(_state, args(0), args(1))
0
}
private def _put_string_stream_uni(args: Array[Int]): Int = {
glk.put_string_stream_uni(_state, args(0), args(1))
0
}
private def _put_string_uni(args: Array[Int]): Int = {
glk.put_string_uni(_state, args(0))
0
}
private def _request_char_event(args: Array[Int]): Int = {
glk.request_char_event(args(0))
0
}
private def _request_char_event_uni(args: Array[Int]): Int = {
glk.request_char_event_uni(args(0))
0
}
private def _request_hyperlink_event(args: Array[Int]): Int = {
glk.request_hyperlink_event(args(0))
0
}
private def _request_line_event(args: Array[Int]): Int = {
glk.request_line_event(args(0), args(1), args(2), args(3))
0
}
private def _request_line_event_uni(args: Array[Int]): Int = {
glk.request_line_event_uni(args(0), args(1), args(2), args(3))
0
}
private def _request_mouse_event(args: Array[Int]): Int = {
glk.request_mouse_event(args(0))
0
}
private def _request_timer_events(args: Array[Int]): Int = {
glk.request_timer_events(args(0))
0
}
private def _schannel_create(args: Array[Int]): Int = glk.schannel_create(args(0))
private def _schannel_destroy(args: Array[Int]): Int = {
glk.schannel_destroy(args(0))
0
}
private def _schannel_get_rock(args: Array[Int]): Int = glk.schannel_get_rock(args(0))
private def _schannel_iterate(args: Array[Int]): Int = {
returnIterateResult(glk.schannel_iterate(args(0)), args(1))
}
private def _schannel_play(args: Array[Int]): Int = glk.schannel_play(args(0), args(1))
private def _schannel_play_ext(args: Array[Int]): Int = {
glk.schannel_play_ext(args(0), args(1), args(2), args(3))
}
private def _schannel_set_volume(args: Array[Int]): Int = {
glk.schannel_set_volume(args(0), args(1))
0
}
private def _schannel_stop(args: Array[Int]): Int = {
glk.schannel_stop(args(0))
0
}
private def _select(args: Array[Int]): Int = {
glk.select(args(0))
0
}
private def _select_poll(args: Array[Int]): Int = {
glk.select_poll(args(0))
0
}
private def _set_hyperlink(args: Array[Int]): Int = {
glk.set_hyperlink(args(0))
0
}
private def _set_hyperlink_stream(args: Array[Int]): Int = {
glk.set_hyperlink_stream(args(0), args(1))
0
}
private def _set_interrupt_handler(args: Array[Int]): Int = {
glk.set_interrupt_handler(_state, args(0))
0
}
private def _set_style(args: Array[Int]): Int = {
glk.set_style(args(0))
0
}
private def _set_style_stream(args: Array[Int]): Int = {
glk.set_style_stream(args(0), args(1))
0
}
private def _set_window(args: Array[Int]): Int = {
glk.set_window(args(0))
0
}
private def _sound_load_hint(args: Array[Int]): Int = {
glk.sound_load_hint(args(0), args(1))
0
}
private def _stream_close(args: Array[Int]): Int = {
val result = glk.stream_close(args(0))
val resultRef = args(1)
if (resultRef == -1) {
// push on stack
_state.pushInt(result.readCount)
_state.pushInt(result.writeCount)
} else if (resultRef != 0) {
// write to mem
_state.setMemIntAt(resultRef, result.readCount)
_state.setMemIntAt(resultRef + 4, result.writeCount)
}
0
}
private def _stream_get_current(args: Array[Int]) = glk.stream_get_current
private def _stream_get_position(args: Array[Int]) = glk.stream_get_position(args(0))
private def _stream_get_rock(args: Array[Int]) = glk.stream_get_rock(args(0))
private def _stream_iterate(args: Array[Int]): Int = {
returnIterateResult(glk.stream_iterate(args(0)), args(1))
}
private def _stream_open_file(args: Array[Int]) = {
glk.stream_open_file(args(0), args(1), args(2))
}
private def _stream_open_file_uni(args: Array[Int]): Int = {
glk.stream_open_file(args(0), args(1), args(2))
}
private def _stream_open_memory(args: Array[Int]) = {
glk.stream_open_memory(_state, args(0), args(1), args(2), args(3))
}
private def _stream_open_memory_uni(args: Array[Int]) = {
glk.stream_open_memory_uni(_state, args(0), args(1), args(2), args(3))
}
private def _stream_set_current(args: Array[Int]): Int = {
glk.stream_set_current(args(0))
0
}
private def _stream_set_position(args: Array[Int]): Int = {
glk.stream_set_position(args(0), args(1), args(2))
0
}
private def _style_distinguish(args: Array[Int]): Int = {
glk.style_distinguish(args(0), args(1), args(2))
}
private def _style_measure(args: Array[Int]): Int = {
glk.style_measure(_state, args(0), args(1), args(2), args(3))
}
private def _stylehint_clear(args: Array[Int]): Int = {
glk.stylehint_clear(args(0), args(1), args(2))
0
}
private def _stylehint_set(args: Array[Int]): Int = {
glk.stylehint_set(args(0), args(1), args(2), args(3))
0
}
private def _tick(args: Array[Int]): Int = {
glk.tick
0
}
private def _window_clear(args: Array[Int]): Int = {
glk.window_clear(args(0))
0
}
private def _window_close(args: Array[Int]): Int = {
val charsWritten = glk.window_close(args(0))
val streamResultPtr = args(1)
if (streamResultPtr != 0) _state.setMemIntAt(streamResultPtr, charsWritten)
0
}
private def _window_erase_rect(args: Array[Int]): Int = {
glk.window_erase_rect(args(0), args(1), args(2), args(3), args(4))
0
}
private def _window_fill_rect(args: Array[Int]): Int = {
glk.window_fill_rect(args(0), args(1), args(2), args(3), args(4), args(5))
0
}
private def _window_flow_break(args: Array[Int]): Int = {
glk.window_flow_break(args(0))
0
}
private def _window_get_arrangement(args: Array[Int]): Int = {
glk.window_get_arrangement(_state, args(0), args(1), args(2), args(3))
0
}
private def _window_get_parent(args: Array[Int]) = glk.window_get_parent(args(0))
private def _window_get_rock(args: Array[Int]) = glk.window_get_rock(args(0))
private def _window_get_root(args: Array[Int]) = glk.window_get_root
private def _window_get_sibling(args: Array[Int]) = glk.window_get_sibling(args(0))
private def _window_get_size(args: Array[Int]): Int = {
val dim = glk.window_get_size(args(0))
val widthPtr = args(1)
val heightPtr = args(2)
if (widthPtr == -1) _state.pushInt(dim.width)
else if (widthPtr != 0) _state.setMemIntAt(widthPtr, dim.width)
if (heightPtr == -1) _state.pushInt(dim.height)
else if (heightPtr != 0) _state.setMemIntAt(heightPtr, dim.height)
0
}
private def _window_get_stream(args: Array[Int]): Int = glk.window_get_stream(args(0))
private def _window_get_type(args: Array[Int]): Int = glk.window_get_type(args(0))
private def _window_iterate(args: Array[Int]): Int = {
returnIterateResult(glk.window_iterate(args(0)), args(1))
}
private def _window_move_cursor(args: Array[Int]): Int = {
glk.window_move_cursor(args(0), args(1), args(2))
0
}
private def _window_open(args: Array[Int]): Int = {
glk.window_open(args(0), args(1), args(2), args(3), args(4))
}
private def _window_set_arrangement(args: Array[Int]): Int = {
glk.window_set_arrangement(args(0), args(1), args(2), args(3))
0
}
private def _window_set_background_color(args: Array[Int]): Int = {
glk.window_set_background_color(args(0), args(1))
0
}
private def _window_set_echo_stream(args: Array[Int]): Int = {
glk.window_set_echo_stream(args(0), args(1))
0
}
private def _window_get_echo_stream(args: Array[Int]): Int = {
glk.window_get_echo_stream(args(0))
}
// ***********************************************************************
// ***** Utility Functions
// **************************************
private def returnIterateResult(result: GlkIterateResult, rockPtr: Int) = {
// Note: -1 for a reference means "stack"
if (rockPtr == -1) _state.pushInt(result.rock)
else if (rockPtr != 0) _state.setMemIntAt(rockPtr, result.rock)
result.id
}
private def cstringAt(address: Int)= {
val builder = new StringBuilder
var c = _state.memByteAt(address)
if (c != 0xe0) {
throw new IllegalArgumentException(
"only uncompressed C Strings allowed in Glk")
}
var offset = 1
while (c != 0) {
c = _state.memByteAt(address + offset)
builder.append(c.toChar)
offset += 1
}
builder.toString
}
}
| logicmoo/zmpp2 | zmpp-glk/src/main/scala/org/zmpp/glk/GlkDispatch.scala | Scala | bsd-3-clause | 23,621 |
// See LICENSE.txt for license details.
package examples
import chisel3._
import chisel3.util._
//A 4-bit adder with carry in and carry out
class Adder4 extends Module {
val io = IO(new Bundle {
val A = Input(UInt(4.W))
val B = Input(UInt(4.W))
val Cin = Input(UInt(1.W))
val Sum = Output(UInt(4.W))
val Cout = Output(UInt(1.W))
})
//Adder for bit 0
val Adder0 = Module(new FullAdder())
Adder0.io.a := io.A(0)
Adder0.io.b := io.B(0)
Adder0.io.cin := io.Cin
val s0 = Adder0.io.sum
//Adder for bit 1
val Adder1 = Module(new FullAdder())
Adder1.io.a := io.A(1)
Adder1.io.b := io.B(1)
Adder1.io.cin := Adder0.io.cout
val s1 = Cat(Adder1.io.sum, s0)
//Adder for bit 2
val Adder2 = Module(new FullAdder())
Adder2.io.a := io.A(2)
Adder2.io.b := io.B(2)
Adder2.io.cin := Adder1.io.cout
val s2 = Cat(Adder2.io.sum, s1)
//Adder for bit 3
val Adder3 = Module(new FullAdder())
Adder3.io.a := io.A(3)
Adder3.io.b := io.B(3)
Adder3.io.cin := Adder2.io.cout
io.Sum := Cat(Adder3.io.sum, s2).asUInt
io.Cout := Adder3.io.cout
}
| timtian090/Playground | chiselTutorial/src/main/scala/examples/Adder4.scala | Scala | mit | 1,095 |
package net.aicomp.terraforming
import java.applet.Applet
import scala.util.control.Exception.allCatch
class AppletReplayMain extends Applet {
override def start() {
val fps = allCatch opt getParameter("fps").toDouble getOrElse (15.0)
val replay = getParameter("replay")
Main.main(Array("-" + Main.LIGHT_GUI_MODE, "-" + Main.REPLAY_MODE, replay, "-" + Main.FPS, fps.toString));
}
} | AI-comp/Terraforming | src/main/scala/net/aicomp/terraforming/AppletReplayMain.scala | Scala | apache-2.0 | 400 |
import sbt.Keys._
import sbt._
object PublishConfig {
// General settings for subprojects to be published
lazy val doPublish = Seq(
publishMavenStyle := true,
publishArtifact in Test := false,
(for {
username <- Option(System.getenv().get("SONATYPE_USERNAME"))
password <- Option(System.getenv().get("SONATYPE_PASSWORD"))
} yield
credentials += Credentials(
"Sonatype Nexus Repository Manager",
"oss.sonatype.org",
username,
password)
).getOrElse(credentials ++= Seq()),
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value) {
Some("snapshots" at nexus + "content/repositories/snapshots")
} else {
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
}
)
// General settings for subprojects not to be published
lazy val noPublish = Seq(
publishArtifact := false,
publish := {},
publishLocal := {}
)
}
| lefou/blended | project/PublishConfig.scala | Scala | apache-2.0 | 1,000 |
package fr.ramiro.sfuzzy.dsl
import fr.ramiro.sfuzzy.FunctionsUtils.{ max, min }
import fr.ramiro.sfuzzy._
import org.scalatest.FunSuite
import MembershipFunctions.{ piecewiseLinear => l }
/**
* Created by Ramiro on 11/05/2017.
*/
class QualifyTest extends FunSuite {
test("AND -> MIN") {
case object scoring extends FuzzyVar {
val value = l((0, 1), (0, 1))
val veryHigh = l((0, 1), (0, 1))
val high = l((0, 1), (0, 1))
val midHigh = l((0, 1), (0, 1))
val midLow = l((0, 1), (0, 1))
val low = l((0, 1), (0, 1))
}
case object sel extends FuzzyVar {
val a = l((0, 1), (0, 1))
val b = l((0, 1), (0, 1))
val c1 = l((0, 1), (0, 1))
val c2 = l((0, 1), (0, 1))
val c3 = l((0, 1), (0, 1))
val d1 = l((0, 1), (0, 1))
val low = l((0, 1), (0, 1))
}
case object city extends FuzzyVar {
val bsas = l((0, 1), (0, 1))
val other = l((0, 1), (0, 1))
}
case object occupation_type extends FuzzyVar {
val good = l((0, 1), (0, 1))
val other_1 = l((0, 1), (0, 1))
val other_2 = l((0, 1), (0, 1))
}
case object scoring_partner extends FuzzyVar {
val midLow = l((0, 1), (0, 1))
val low = l((0, 1), (0, 1))
val noPartner = l((0, 1), (0, 1))
val fakeRange = l((0, 1), (0, 1))
}
case object qualify extends DefuzzyVar {
val accept = l((0, 1), (0, 1))
val manual_accept = l((0, 1), (0, 1))
val manual_reject = l((0, 1), (0, 1))
val reject = l((0, 1), (0, 1))
val method = FunctionsUtils.cog(0, 30, 1e-2d)
val default = 0
}
case object credLimMul extends DefuzzyVar {
val veryHigh = l((0, 1), (0, 1))
val high = l((0, 1), (0, 1))
val midHigh = l((0, 1), (0, 1))
val midLow = l((0, 1), (0, 1))
val low = l((0, 1), (0, 1))
val method = FunctionsUtils.cog(0, 30, 1e-2d)
val default = 0
}
import qualify._
import credLimMul._
implicit val config = FuzzyConfiguration(
andMethod = min,
orMethod = max,
activation = min,
accumulation = max
)
import Rule._
val fuzzyRules = FuzzyFunction(
// Scoring rules
IF(scoring IS scoring.veryHigh) THEN (qualify IS accept, credLimMul IS veryHigh),
IF(scoring IS scoring.high) THEN (qualify IS accept, credLimMul IS high),
IF(scoring IS scoring.midHigh) THEN (qualify IS manual_accept, credLimMul IS midHigh),
IF(scoring IS scoring.midLow) THEN (qualify IS manual_reject, credLimMul IS midLow),
IF(scoring IS scoring.low) THEN (qualify IS reject, credLimMul IS low),
// Social economic level
IF((sel IS sel.a) OR (sel IS sel.b)) THEN (qualify IS accept, credLimMul IS veryHigh),
IF(sel IS sel.c1) THEN (qualify IS accept, credLimMul IS high),
IF(sel IS sel.c2) THEN (qualify IS manual_accept, credLimMul IS midHigh),
IF(sel IS sel.c3) THEN (qualify IS manual_reject, credLimMul IS midLow),
IF((sel IS sel.d1) OR (sel IS sel.low)) THEN (qualify IS reject, credLimMul IS low),
// Ocupation type
IF(NOT(occupation_type IS occupation_type.good)) THEN (qualify IS reject, credLimMul IS low),
// City
IF((city IS city.other) AND (scoring IS scoring.midLow) OR (scoring IS scoring.low)) THEN (qualify IS reject, credLimMul IS low),
// Partner's scoring
IF(scoring_partner IS scoring_partner.midLow) THEN (qualify IS manual_reject, credLimMul IS midLow),
IF(scoring_partner IS scoring_partner.low) THEN (qualify IS reject, credLimMul IS low)
)
}
}
| rrramiro/sFuzzyLogic | src/test/scala/fr/ramiro/sfuzzy/dsl/QualifyTest.scala | Scala | apache-2.0 | 3,600 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.config
import javax.inject.Inject
import play.api.Configuration
class AssetsConfig @Inject() (configuration: Configuration) {
private val assetsUrl = configuration.get[String]("assets.url")
private val assetsVersion = configuration.get[String]("assets.version")
val assetsPrefix: String = assetsUrl + assetsVersion
}
| hmrc/play-ui | src/main/scala/uk/gov/hmrc/play/config/AssetsConfig.scala | Scala | apache-2.0 | 960 |
package com.github.mdr.mash.functions
import com.github.mdr.mash.compiler.{ CompilationUnit, Compiler }
import com.github.mdr.mash.evaluator.StandardEnvironment
import com.github.mdr.mash.inference.TypedArguments
import com.github.mdr.mash.parser.AbstractSyntax.InvocationExpr
import org.scalatest.{ FlatSpec, Matchers }
class BindTypesTest extends FlatSpec with Matchers {
"A single positional argument" should "be tied to its parameter" in {
val param = Parameter(Some("param"), Some("test param"))
ParameterModel(Seq(param)).boundTypeParams("f 1").paramAt(0) should equal(Some(param))
}
"Multiple positional arguments" should "be tied to ordinary positional parameters" in {
val param1 = Parameter(Some("param1"), Some("test param"))
val param2 = Parameter(Some("param2"), Some("test param"))
val param3 = Parameter(Some("param3"), Some("test param"))
ParameterModel(Seq(param1, param2, param3)).boundTypeParams("f 1 2 3").posToParam should equal(Map(
0 -> param1, 1 -> param2, 2 -> param3))
}
"Multiple positional arguments" should "be tied to a single variadic parameter" in {
val param = Parameter(Some("param"), Some("test param"), isVariadic = true)
ParameterModel(Seq(param)).boundTypeParams("f 1 2 3").posToParam should equal(Map(
0 -> param, 1 -> param, 2 -> param))
}
"Extra positional arguments" should "be tied to a single variadic parameter" in {
val param1 = Parameter(Some("param1"), Some("test param"))
val param2 = Parameter(Some("param2"), Some("test param"), isVariadic = true)
ParameterModel(Seq(param1, param2)).boundTypeParams("f 1 2 3").posToParam should equal(Map(
0 -> param1, 1 -> param2, 2 -> param2))
}
"Named arguments" should "be tied to their parameter" in {
val param = Parameter(Some("param"), Some("test param"))
ParameterModel(Seq(param)).boundTypeParams("f --param=1").posToParam should equal(Map(0 -> param))
}
"Multiple named arguments" should "be tied to their parameter" in {
val param1 = Parameter(Some("param1"), Some("test param"))
val param2 = Parameter(Some("param2"), Some("test param"))
ParameterModel(Seq(param1, param2)).boundTypeParams("f --param2=1 --param1=2").posToParam should equal(
Map(0 -> param2, 1 -> param1))
}
"Last arguments" should "be tied to the last parameter with variadics" in {
val param1 = Parameter(Some("param1"), Some("test param"), isVariadic = true)
val param2 = Parameter(Some("param2"), Some("test param"))
ParameterModel(Seq(param1, param2)).boundTypeParams("f 1 2 3").posToParam should equal(
Map(0 -> param1, 1 -> param1, 2 -> param2))
}
"Last arguments" should "be tied to the last parameter with optionals" in {
val param1 = Parameter(Some("param1"), Some("test param"), defaultValueGeneratorOpt = Some(true))
val param2 = Parameter(Some("param2"), Some("test param"))
ParameterModel(Seq(param1, param2)).boundTypeParams("f 1").posToParam should equal(
Map(0 -> param2))
}
"Last arguments" should "not be used if specified by name" in {
val param1 = Parameter(Some("param1"), Some("test param"), defaultValueGeneratorOpt = Some(true))
val param2 = Parameter(Some("param2"), Some("test param"))
ParameterModel(Seq(param1, param2)).boundTypeParams("f 1 --param2=2").posToParam should equal(
Map(0 -> param1, 1 -> param2))
}
"All the features" should "work together" in {
val optionalParam = Parameter(Some("optional"), Some("test param"), defaultValueGeneratorOpt = Some(true))
val variadicParam = Parameter(Some("variadic"), Some("test param"), isVariadic = true)
val lastParam = Parameter(Some("last"), Some("test param"))
val positionalParam = Parameter(Some("positional"), Some("test param"))
val namedParam = Parameter(Some("named"), Some("test param"), isFlag = true)
val parameters = ParameterModel(Seq(positionalParam, optionalParam, variadicParam, lastParam, namedParam))
parameters.boundTypeParams("f 1 2 3 4 5 --named=5").posToParam should equal(
Map(0 -> positionalParam, 1 -> optionalParam, 2 -> variadicParam, 3 -> variadicParam, 4 -> lastParam, 5 -> namedParam))
}
implicit class RichParameterModel(parameters: ParameterModel) {
def boundTypeParams(s: String): BoundTypeParams =
parameters.bindTypes(getArguments(s))
private def getArguments(s: String): TypedArguments = {
val expr = Compiler.compileForgiving(CompilationUnit(s), StandardEnvironment.create.bindings)
val Some(invocationExpr) = expr.find { case iexpr: InvocationExpr ⇒ iexpr }
TypedArguments.from(invocationExpr)
}
}
} | mdr/mash | src/test/scala/com/github/mdr/mash/functions/BindTypesTest.scala | Scala | mit | 4,654 |
package sp.areus
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import sp.domain._
import sp.system.messages._
import scala.concurrent.duration._
import scala.xml._
/**
* Created by Kristofer on 2014-06-27.
*/
class VCImportService(modelHandler: ActorRef) extends Actor {
implicit val timeout = Timeout(1 seconds)
import context.dispatcher
def receive = {
case None => "TODO"
// case Request(_, attr) => {
// val reply = sender
// extract(attr) match {
// case Some((xmlString, model, name)) => {
//
// val areus = scala.xml.XML.loadString(xmlString)
//
// val robots = (areus \\\\ "SimulationResults").head.child
//
// val temp = robots.map{r =>
// r.child.map{o =>
// val tss = o \\\\ "TimeStamp"
// tss.map {ts =>
// val time = ts.attribute("time")
// val joints = ts \\ "Joints" head
// val positions = ts \\ "Position" head
// val speed = ts \\ "Speed" head
// val collisions = ts \\ "Collisions" head
//
// val jointValues = parseJointString(joints)
//
// //println(s"time: $time, joints: $joints")
//
//
//
// }
// }
//
//
// }
//
//
//
//
//
//
//
//
//
// reply ! "yes"
//
// }
// case None => sender ! errorMessage(attr)
// }
// }
}
def parseJointString(n: Node) = {
val value = n.attribute("values").map(_.toString)
value.map{s =>
val js = s.trim.split("""\\s+""").toList
val test = js.map(_.replaceAll("""^[^=]*=""", ""))
println(test)
}
}
// def extract(attr: SPAttributes) = {
// for {
// xml <- attr.getAsString("file")
// model <- attr.getAsID("model")
// } yield (xml, model, attr.get("name"))
// }
//
// def errorMessage(attr: SPAttributes) = {
// SPError("The request is missing parameters: \\n" +
// s"file: ${attr.getAsString("file")}" + "\\n" +
// s"Request: ${attr}" )
// }
// def toAttr(n : Node): SPAttributeValue = {
// val attr = n.attributes.asAttrMap.map{case (k, v) => k -> StringPrimitive(v)}
// val value: SPAttributeValue = {
// if (n.child.count(_.isInstanceOf[Text]) == 1) {
// val value = StringPrimitive(n.text)
// if (attr.isEmpty) value
// else MapPrimitive(attr + ("value"->value))
// }
// else {
// val children = n.child //.filter(n => n.isInstanceOf[Text] || n.isInstanceOf[Elem])
// val fold = children.foldLeft(Map[String,List[SPAttributeValue]]()){
// case (aggr, e: Elem) => {
// val newAttr = toAttr(e)
// val prev = aggr.getOrElse(e.label, List())
// val xs = if (newAttr != MapPrimitive(Map())) newAttr :: prev else prev
// aggr + (e.label -> xs)
// }
// case (aggr, t: Text) => aggr
// }
//
// val map = fold collect {
// case (k, x :: Nil) => k -> x
// case (k, x :: xs) => k -> ListPrimitive(x :: xs)
// }
// MapPrimitive(map ++ attr)
// }
//
// }
// value
// }
}
object VCImportService{
def props(modelHandler: ActorRef) = Props(classOf[VCImportService], modelHandler)
}
| kristoferB/SP | sp1/src/main/scala/sp/areus/VCImportService.scala | Scala | mit | 3,293 |
package im.tox.antox.callbacks
import android.content.Context
import im.tox.antox.tox.ToxSingleton
import im.tox.antox.utils.{AntoxFriend, Constants}
import im.tox.tox4j.core.callbacks.FriendActionCallback
object AntoxOnActionCallback {
private val TAG = "im.tox.antox.TAG"
}
class AntoxOnActionCallback(private var ctx: Context) extends FriendActionCallback {
override def friendAction(friendNumber: Int, timeDelta: Int, message: Array[Byte]): Unit = {
AntoxOnMessageCallback.handleMessage(ctx, friendNumber,
ToxSingleton.getIdFromFriendNumber(friendNumber),
new String(message, "UTF-8"),
Constants.MESSAGE_TYPE_ACTION)
}
}
| 0xPoly/Antox | app/src/main/scala/im/tox/antox/callbacks/AntoxOnActionCallback.scala | Scala | gpl-3.0 | 658 |
/*
Implicit Flows: a prototype taint tracking system for implicit flows
Copyright (C) 2013 Petey Aldous <petey.aldous@utah.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.ucombinator.experimental
case class AbstractState(program: AbstractProgram, statements: Int, env: Map[AbstractVariable, AbstractValue], taintedVars: Set[AbstractVariable], contextTaint: Set[Int]) {
override def toString = program.statementTable(statements).toString
def next: Set[AbstractState] = {
if (isEnd) {
scala.sys.error("next: should be unreachable")
} else {
val ctPrime = contextTaint.filter((source) => !program.mustReach(source).contains(statements))
program.statementTable(statements) match {
case AbstractLabelStatement(id, l) => Set(AbstractState(program, statements + 1, env, taintedVars, ctPrime))
case AbstractGotoStatement(id, l) => Set(AbstractState(program, program.lookup(l), env, taintedVars, ctPrime))
case AbstractAssignmentStatement(id, v, e) => {
val envPrime = env + Pair(v, program.eval(e, env))
val tPrime = if (program.tainted(e, taintedVars) || !(contextTaint.isEmpty)) taintedVars + v else taintedVars - v
Set(AbstractState(program, statements + 1, envPrime, tPrime, ctPrime))
}
case AbstractIfStatement(id, e, l) => {
val condResult = program.eval(e, env)
val statementListSet = Set(statements)
val fallThrough = if (AbstractValues.zero.contains(condResult)) Set(statements + 1) else statementListSet.empty
val jump = if (AbstractValues.positive.contains(condResult)) Set(program.lookup(l)) else statementListSet.empty
val sPrimes = fallThrough | jump
val ctPrimePrime = if (program.tainted(e, taintedVars)) {
ctPrime + id
} else {
ctPrime
}
val stateSet = Set(this)
sPrimes.foldLeft(stateSet.empty)((states, sPrime) => states + AbstractState(program, sPrime, env, taintedVars, ctPrimePrime))
}
case _ => throw new IllegalStateException("next: unknown statement: " + program.statementTable(statements))
}
}
}
def isEnd: Boolean = statements == program.lastLineNumber
}
object AbstractStateFactory {
def empty: AbstractState = AbstractProgramFactory.empty.firstState
} | peteyblueeyes/implicit-flows | src/org/ucombinator/experimental/AbstractState.scala | Scala | gpl-2.0 | 3,045 |
/*
* UGenGraph.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
import akka.NotUsed
import akka.stream.ClosedShape
import akka.stream.scaladsl.{GraphDSL, RunnableGraph}
import de.sciss.fscape.Log.{graph => logGraph}
import de.sciss.fscape.graph.{Constant, UGenProxy}
import de.sciss.fscape.stream.{Allocator, Layer, StreamIn, StreamOut}
import scala.annotation.elidable
import scala.collection.immutable.{IndexedSeq => Vec}
object UGenGraph {
trait Builder {
def addUGen(ugen: UGen): Unit
def visit[U](ref: AnyRef, init: => U): U
def expandNested(graph: Graph): Unit
def allocLayer(): Layer
def withLayer[A](layer: Layer)(block: => A): A
def withBlockSize[A](blockSize: Int)(block: => A): A
}
def build(graph: Graph)(implicit ctrl: stream.Control): UGenGraph = {
val b = new Impl
b.expandNested(graph)
b.build
}
// ---- IndexedUGen ----
final class IndexedUGenBuilder(val ugen: UGen, val layer: Int, val blockSize: Int, var effective: Boolean) {
var children : Array[List[IndexedUGenBuilder]] = Array.fill(ugen.numOutputs)(Nil)
var inputIndices: List[UGenInIndex] = Nil
var index : Int = -1
override def toString = s"Idx($ugen, $effective) : richInputs = $inputIndices"
}
private[fscape] sealed trait UGenInIndex {
def makeEffective(): Int
}
private[fscape] final class ConstantIndex(val peer: Constant) extends UGenInIndex {
def makeEffective() = 0
override def toString: String = peer.toString
}
private[fscape] final class UGenProxyIndex(val iu: IndexedUGenBuilder, val outIdx: Int) extends UGenInIndex {
def makeEffective(): Int = {
if (!iu.effective) {
iu.effective = true
var numEff = 1
iu.inputIndices.foreach(numEff += _.makeEffective())
numEff
} else 0
}
override def toString = s"$iu[$outIdx]"
}
// - converts to StreamIn objects that automatically insert stream broadcasters
// and dummy sinks
def buildStream(ugens: Vec[IndexedUGenBuilder])
(implicit ctrl: stream.Control): RunnableGraph[NotUsed] = {
// empty graphs are not supported by Akka
if (ugens.isEmpty) throw new IllegalStateException("Graph is empty")
val _graph = GraphDSL.create() { implicit dsl =>
implicit val sb: stream.Builder.Settable = stream.Builder()
var ugenOutMap = Map.empty[IndexedUGenBuilder, Array[StreamIn]]
// var allocMap = Map(0 -> sb.allocator)
var oldLayer = 0
val defaultAlloc = sb.allocator
val defaultBlockSize= defaultAlloc.blockSize
var oldBlockSize = 0 // defaultBlockSize
var allocMap = Map.empty[Int, Allocator]
ugens.foreach { iu =>
val args: Vec[StreamIn] = iu.inputIndices.iterator.map {
case c: ConstantIndex => c.peer
case u: UGenProxyIndex => ugenOutMap(u.iu)(u.outIdx)
} .toIndexedSeq
@inline def add(value: Array[StreamIn]): Unit = {
// println(s"map += $iu -> ${value.mkString("[", ", ", "]")}")
ugenOutMap += iu -> value
}
val newLayer = iu.layer
if (newLayer != oldLayer) {
sb.layer = newLayer
oldLayer = newLayer
}
val newBlockSize = iu.blockSize
if (newBlockSize != oldBlockSize) {
val alloc = if (newBlockSize == 0 || newBlockSize == defaultBlockSize) defaultAlloc else {
allocMap.getOrElse(newBlockSize, {
val control = defaultAlloc.control
val newAlloc = Allocator(
control = control,
blockSize = newBlockSize,
randomSeed = defaultAlloc.newSeed(),
)
allocMap += newBlockSize -> newAlloc
newAlloc
})
}
sb.allocator = alloc
oldBlockSize = newBlockSize
}
iu.ugen match {
case ugen: UGen.SingleOut =>
val out: StreamOut = ugen.source.makeStream(args)
val numChildren = iu.children(0).size
val value = Array(out.toIn(numChildren))
add(value)
case ugen: UGen.MultiOut =>
val outs : Vec [StreamOut] = ugen.source.makeStream(args)
val value: Array[StreamIn ] = outs.iterator.zipWithIndex.map { case (out, outIdx) =>
val numChildren = iu.children(outIdx).size
out.toIn(numChildren)
} .toArray
add(value)
case ugen: UGen.ZeroOut =>
ugen.source.makeStream(args)
}
}
ClosedShape
}
RunnableGraph.fromGraph(_graph)
}
/////////////////////////////////////////////
private final class Impl extends Basic
private final class UGenInLayer(val ugen: UGen, val layer: Layer, val blockSize: Int)
private[fscape] trait Basic extends Builder /* with Graph.Builder */ {
builder =>
// ---- abstract ----
// ---- impl ----
private[this] var layer = 0
private[this] var blockSize = 0
private[this] var _ugens = Vector.empty[UGenInLayer]
// private[this] val ugenSet = mutable.Set.empty[UGen]
protected var sourceMap = Map.empty[AnyRef, Any]
// private[this] var _blockSizeMap = Map.empty[Layer, Int]
//
// protected final def blockSizeMap: Map[Layer, Int] = _blockSizeMap
// protected final def ugens: Vec[UGen] = _ugens
// - builds parent-child graph of UGens
// - deletes no-op sub-trees
protected def indexUGens(): Vec[IndexedUGenBuilder] = {
val ugens = _ugens
var numIneffective = ugens.size
val indexedUGens = ugens.map { ul =>
val eff = ul.ugen.hasSideEffect
if (eff) numIneffective -= 1
new IndexedUGenBuilder(ul.ugen, layer = ul.layer, blockSize = ul.blockSize, effective = eff)
}
val ugenMap: Map[AnyRef, IndexedUGenBuilder] = indexedUGens.iterator.map(iu => (iu.ugen, iu)).toMap
indexedUGens.foreach { iu =>
iu.inputIndices = iu.ugen.inputs.iterator.map {
case c: Constant =>
new ConstantIndex(c)
case up: UGenProxy =>
val iui = ugenMap(up.ugen)
iui.children(up.outputIndex) ::= iu
new UGenProxyIndex(iui, up.outputIndex)
} .toList
if (iu.effective) iu.inputIndices.foreach(numIneffective -= _.makeEffective())
}
val filtered: Vec[IndexedUGenBuilder] =
if (numIneffective == 0)
indexedUGens
else
indexedUGens.collect {
case iu if iu.effective =>
for (outputIndex <- iu.children.indices) {
iu.children(outputIndex) = iu.children(outputIndex).filter(_.effective)
}
iu
}
filtered
}
def build(implicit ctrl: stream.Control): UGenGraph = {
val iUGens = indexUGens()
val rg = buildStream(iUGens)
UGenGraph(rg)
}
@inline
private def printRef(ref: AnyRef): String = {
val hash = ref.hashCode.toHexString
// ref match {
// case p: Product => s"${p.productPrefix}@$hash"
// case _ => hash
// }
hash
}
final def visit[U](ref: AnyRef, init: => U): U = {
log(this, s"visit ${printRef(ref)}")
val res = sourceMap.getOrElse(ref, {
log(this, s"expand ${smartRef(ref)}...")
val exp: Any = init
sourceMap += ref -> exp
log(this, s"...${smartRef(ref)} -> ${exp.hashCode.toHexString} ${printSmart(exp)}")
exp
})
res.asInstanceOf[U] // not so pretty...
}
def addUGen(ugen: UGen): Unit = {
// Where is this check in ScalaCollider? Have we removed it (why)?
// N.B.: We do not use UGen equality any longer in FScape because
// we might need to feed the same structure into different sinks
// that read at different speeds, so we risk to block the graph
// (Imagine a `DC(0.0)` going into two entirely different places!)
// if (ugenSet.add(ugen)) {
_ugens :+= new UGenInLayer(ugen, layer = layer ,blockSize = blockSize)
log(this, s"addUGen ${ugen.name} @ ${ugen.hashCode.toHexString} ${if (ugen.isIndividual) "indiv" else ""} (layer $layer)")
// } else {
// log(this, s"addUGen ${ugen.name} @ ${ugen.hashCode.toHexString} - duplicate")
// }
}
override def expandNested(graph: Graph): Unit = {
var g0 = graph
while (g0.nonEmpty) {
g0 = Graph {
g0.sources.foreach { source =>
source.force(builder)
}
}
}
}
override def allocLayer(): Layer = allocId()
override def withLayer[A](_layer: Layer)(block: => A): A = {
val oldLayer = layer
layer = _layer
val res = block
layer = oldLayer
res
}
override def withBlockSize[A](_blockSize: Int)(block: => A): A = {
val oldBlockSize = blockSize
blockSize = _blockSize
val res = block
blockSize = oldBlockSize
res
}
private[this] var idCount = 0
/* Allocates a unique increasing identifier. */
private def allocId(): Int = {
idCount += 1
idCount
}
}
private def smartRef(ref: AnyRef): String = {
val t = new Throwable
t.fillInStackTrace()
val trace = t.getStackTrace
val opt = trace.collectFirst {
case ste if (ste.getMethodName == "force" || ste.getMethodName == "expand") && ste.getFileName != "Lazy.scala" =>
val clz = ste.getClassName
val i = clz.lastIndexOf(".") + 1
val j = clz.lastIndexOf("@", i)
val s = if (j < 0) clz.substring(i) else clz.substring(i, j)
s"$s@${ref.hashCode().toHexString}"
}
opt.getOrElse(ref.hashCode.toHexString)
}
final var showLog = false
private def printSmart(x: Any): String = x match {
case u: UGen => u.name
case _ => x.toString
}
@elidable(elidable.CONFIG) private def log(builder: Basic, what: => String): Unit =
if (showLog) logGraph.debug(s"<${builder.toString}> $what")
}
final case class UGenGraph(runnable: RunnableGraph[NotUsed]) | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/UGenGraph.scala | Scala | agpl-3.0 | 10,518 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io._
import java.nio.charset.StandardCharsets.UTF_8
import java.util.{ArrayList => JArrayList, List => JList, Locale}
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import jline.console.ConsoleReader
import jline.console.history.FileHistory
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.cli.{CliDriver, CliSessionState, OptionsProcessor}
import org.apache.hadoop.hive.common.HiveInterruptUtils
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.processors._
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.thrift.transport.TSocket
import org.slf4j.LoggerFactory
import sun.misc.{Signal, SignalHandler}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.client.HiveClientImpl
import org.apache.spark.sql.hive.security.HiveDelegationTokenProvider
import org.apache.spark.sql.internal.SharedState
import org.apache.spark.util.ShutdownHookManager
/**
* This code doesn't support remote connections in Hive 1.2+, as the underlying CliDriver
* has dropped its support.
*/
private[hive] object SparkSQLCLIDriver extends Logging {
private val prompt = "spark-sql"
private val continuedPrompt = "".padTo(prompt.length, ' ')
private var transport: TSocket = _
private final val SPARK_HADOOP_PROP_PREFIX = "spark.hadoop."
initializeLogIfNecessary(true)
installSignalHandler()
/**
* Install an interrupt callback to cancel all Spark jobs. In Hive's CliDriver#processLine(),
* a signal handler will invoke this registered callback if a Ctrl+C signal is detected while
* a command is being processed by the current thread.
*/
def installSignalHandler(): Unit = {
HiveInterruptUtils.add(() => {
// Handle remote execution mode
if (SparkSQLEnv.sparkContext != null) {
SparkSQLEnv.sparkContext.cancelAllJobs()
} else {
if (transport != null) {
// Force closing of TCP connection upon session termination
transport.getSocket.close()
}
}
})
}
def main(args: Array[String]): Unit = {
val oproc = new OptionsProcessor()
if (!oproc.process_stage1(args)) {
System.exit(1)
}
val sparkConf = new SparkConf(loadDefaults = true)
val hadoopConf = SparkHadoopUtil.get.newConfiguration(sparkConf)
val extraConfigs = HiveUtils.formatTimeVarsForHiveClient(hadoopConf)
val cliConf = HiveClientImpl.newHiveConf(sparkConf, hadoopConf, extraConfigs)
val sessionState = new CliSessionState(cliConf)
sessionState.in = System.in
try {
sessionState.out = new PrintStream(System.out, true, UTF_8.name())
sessionState.info = new PrintStream(System.err, true, UTF_8.name())
sessionState.err = new PrintStream(System.err, true, UTF_8.name())
} catch {
case e: UnsupportedEncodingException => System.exit(3)
}
if (!oproc.process_stage2(sessionState)) {
System.exit(2)
}
// Set all properties specified via command line.
val conf: HiveConf = sessionState.getConf
// Hive 2.0.0 onwards HiveConf.getClassLoader returns the UDFClassLoader (created by Hive).
// Because of this spark cannot find the jars as class loader got changed
// Hive changed the class loader because of HIVE-11878, so it is required to use old
// classLoader as sparks loaded all the jars in this classLoader
conf.setClassLoader(Thread.currentThread().getContextClassLoader)
sessionState.cmdProperties.entrySet().asScala.foreach { item =>
val key = item.getKey.toString
val value = item.getValue.toString
// We do not propagate metastore options to the execution copy of hive.
if (key != "javax.jdo.option.ConnectionURL") {
conf.set(key, value)
sessionState.getOverriddenConfigurations.put(key, value)
}
}
val tokenProvider = new HiveDelegationTokenProvider()
if (tokenProvider.delegationTokensRequired(sparkConf, hadoopConf)) {
val credentials = new Credentials()
tokenProvider.obtainDelegationTokens(hadoopConf, sparkConf, credentials)
UserGroupInformation.getCurrentUser.addCredentials(credentials)
}
val warehousePath = SharedState.resolveWarehousePath(sparkConf, conf)
val qualified = SharedState.qualifyWarehousePath(conf, warehousePath)
SharedState.setWarehousePathConf(sparkConf, conf, qualified)
SessionState.setCurrentSessionState(sessionState)
// Clean up after we exit
ShutdownHookManager.addShutdownHook { () => SparkSQLEnv.stop() }
if (isRemoteMode(sessionState)) {
// Hive 1.2 + not supported in CLI
throw QueryExecutionErrors.remoteOperationsUnsupportedError()
}
// Respect the configurations set by --hiveconf from the command line
// (based on Hive's CliDriver).
val hiveConfFromCmd = sessionState.getOverriddenConfigurations.entrySet().asScala
val newHiveConf = hiveConfFromCmd.map { kv =>
// If the same property is configured by spark.hadoop.xxx, we ignore it and
// obey settings from spark properties
val k = kv.getKey
val v = sys.props.getOrElseUpdate(SPARK_HADOOP_PROP_PREFIX + k, kv.getValue)
(k, v)
}
val cli = new SparkSQLCLIDriver
cli.setHiveVariables(oproc.getHiveVariables)
// In SparkSQL CLI, we may want to use jars augmented by hiveconf
// hive.aux.jars.path, here we add jars augmented by hiveconf to
// Spark's SessionResourceLoader to obtain these jars.
val auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS)
if (StringUtils.isNotBlank(auxJars)) {
val resourceLoader = SparkSQLEnv.sqlContext.sessionState.resourceLoader
StringUtils.split(auxJars, ",").foreach(resourceLoader.addJar(_))
}
// The class loader of CliSessionState's conf is current main thread's class loader
// used to load jars passed by --jars. One class loader used by AddJarsCommand is
// sharedState.jarClassLoader which contain jar path passed by --jars in main thread.
// We set CliSessionState's conf class loader to sharedState.jarClassLoader.
// Thus we can load all jars passed by --jars and AddJarsCommand.
sessionState.getConf.setClassLoader(SparkSQLEnv.sqlContext.sharedState.jarClassLoader)
// TODO work around for set the log output to console, because the HiveContext
// will set the output into an invalid buffer.
sessionState.in = System.in
try {
sessionState.out = new PrintStream(System.out, true, UTF_8.name())
sessionState.info = new PrintStream(System.err, true, UTF_8.name())
sessionState.err = new PrintStream(System.err, true, UTF_8.name())
} catch {
case e: UnsupportedEncodingException => System.exit(3)
}
if (sessionState.database != null) {
SparkSQLEnv.sqlContext.sessionState.catalog.setCurrentDatabase(
s"${sessionState.database}")
}
// Execute -i init files (always in silent mode)
cli.processInitFiles(sessionState)
// We don't propagate hive.metastore.warehouse.dir, because it might has been adjusted in
// [[SharedState.loadHiveConfFile]] based on the user specified or default values of
// spark.sql.warehouse.dir and hive.metastore.warehouse.dir.
for ((k, v) <- newHiveConf if k != "hive.metastore.warehouse.dir") {
SparkSQLEnv.sqlContext.setConf(k, v)
}
cli.printMasterAndAppId
if (sessionState.execString != null) {
System.exit(cli.processLine(sessionState.execString))
}
try {
if (sessionState.fileName != null) {
System.exit(cli.processFile(sessionState.fileName))
}
} catch {
case e: FileNotFoundException =>
logError(s"Could not open input file for reading. (${e.getMessage})")
System.exit(3)
}
val reader = new ConsoleReader()
reader.setBellEnabled(false)
reader.setExpandEvents(false)
// reader.setDebug(new PrintWriter(new FileWriter("writer.debug", true)))
CliDriver.getCommandCompleter.foreach(reader.addCompleter)
val historyDirectory = System.getProperty("user.home")
try {
if (new File(historyDirectory).exists()) {
val historyFile = historyDirectory + File.separator + ".hivehistory"
reader.setHistory(new FileHistory(new File(historyFile)))
} else {
logWarning("WARNING: Directory for Hive history file: " + historyDirectory +
" does not exist. History will not be available during this session.")
}
} catch {
case e: Exception =>
logWarning("WARNING: Encountered an error while trying to initialize Hive's " +
"history file. History will not be available during this session.")
logWarning(e.getMessage)
}
// add shutdown hook to flush the history to history file
ShutdownHookManager.addShutdownHook { () =>
reader.getHistory match {
case h: FileHistory =>
try {
h.flush()
} catch {
case e: IOException =>
logWarning("WARNING: Failed to write command history file: " + e.getMessage)
}
case _ =>
}
}
// TODO: missing
/*
val clientTransportTSocketField = classOf[CliSessionState].getDeclaredField("transport")
clientTransportTSocketField.setAccessible(true)
transport = clientTransportTSocketField.get(sessionState).asInstanceOf[TSocket]
*/
transport = null
var ret = 0
var prefix = ""
val currentDB = ReflectionUtils.invokeStatic(classOf[CliDriver], "getFormattedDb",
classOf[HiveConf] -> conf, classOf[CliSessionState] -> sessionState)
def promptWithCurrentDB: String = s"$prompt$currentDB"
def continuedPromptWithDBSpaces: String = continuedPrompt + ReflectionUtils.invokeStatic(
classOf[CliDriver], "spacesForString", classOf[String] -> currentDB)
var currentPrompt = promptWithCurrentDB
var line = reader.readLine(currentPrompt + "> ")
while (line != null) {
if (!line.startsWith("--")) {
if (prefix.nonEmpty) {
prefix += '\\n'
}
if (line.trim().endsWith(";") && !line.trim().endsWith("\\\\;")) {
line = prefix + line
ret = cli.processLine(line, true)
prefix = ""
currentPrompt = promptWithCurrentDB
} else {
prefix = prefix + line
currentPrompt = continuedPromptWithDBSpaces
}
}
line = reader.readLine(currentPrompt + "> ")
}
sessionState.close()
System.exit(ret)
}
def isRemoteMode(state: CliSessionState): Boolean = {
// sessionState.isRemoteMode
state.isHiveServerQuery
}
}
private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
private val sessionState = SessionState.get().asInstanceOf[CliSessionState]
private val LOG = LoggerFactory.getLogger(classOf[SparkSQLCLIDriver])
private val console = new SessionState.LogHelper(LOG)
private val isRemoteMode = {
SparkSQLCLIDriver.isRemoteMode(sessionState)
}
private val conf: Configuration =
if (sessionState != null) sessionState.getConf else new Configuration()
// Force initializing SparkSQLEnv. This is put here but not object SparkSQLCliDriver
// because the Hive unit tests do not go through the main() code path.
if (!isRemoteMode) {
SparkSQLEnv.init()
if (sessionState.getIsSilent) {
SparkSQLEnv.sparkContext.setLogLevel("warn")
}
} else {
// Hive 1.2 + not supported in CLI
throw QueryExecutionErrors.remoteOperationsUnsupportedError()
}
override def setHiveVariables(hiveVariables: java.util.Map[String, String]): Unit = {
hiveVariables.asScala.foreach(kv => SparkSQLEnv.sqlContext.conf.setConfString(kv._1, kv._2))
}
def printMasterAndAppId(): Unit = {
val master = SparkSQLEnv.sparkContext.master
val appId = SparkSQLEnv.sparkContext.applicationId
console.printInfo(s"Spark master: $master, Application Id: $appId")
}
override def processCmd(cmd: String): Int = {
val cmd_trimmed: String = cmd.trim()
val cmd_lower = cmd_trimmed.toLowerCase(Locale.ROOT)
val tokens: Array[String] = cmd_trimmed.split("\\\\s+")
val cmd_1: String = cmd_trimmed.substring(tokens(0).length()).trim()
if (cmd_lower.equals("quit") ||
cmd_lower.equals("exit")) {
sessionState.close()
System.exit(0)
}
if (tokens(0).toLowerCase(Locale.ROOT).equals("source") ||
cmd_trimmed.startsWith("!") || isRemoteMode) {
val startTimeNs = System.nanoTime()
super.processCmd(cmd)
val endTimeNs = System.nanoTime()
val timeTaken: Double = TimeUnit.NANOSECONDS.toMillis(endTimeNs - startTimeNs) / 1000.0
console.printInfo(s"Time taken: $timeTaken seconds")
0
} else {
var ret = 0
val hconf = conf.asInstanceOf[HiveConf]
val proc: CommandProcessor = CommandProcessorFactory.get(tokens, hconf)
if (proc != null) {
// scalastyle:off println
if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor] ||
proc.isInstanceOf[AddResourceProcessor] || proc.isInstanceOf[ListResourceProcessor] ||
proc.isInstanceOf[DeleteResourceProcessor] ||
proc.isInstanceOf[ResetProcessor] ) {
val driver = new SparkSQLDriver
driver.init()
val out = sessionState.out
val err = sessionState.err
val startTimeNs: Long = System.nanoTime()
if (sessionState.getIsVerbose) {
out.println(cmd)
}
val rc = driver.run(cmd)
val endTimeNs = System.nanoTime()
val timeTaken: Double = TimeUnit.NANOSECONDS.toMillis(endTimeNs - startTimeNs) / 1000.0
ret = rc.getResponseCode
if (ret != 0) {
rc.getException match {
case e: AnalysisException => e.cause match {
case Some(_) if !sessionState.getIsSilent =>
err.println(
s"""Error in query: ${e.getMessage}
|${org.apache.hadoop.util.StringUtils.stringifyException(e)}
""".stripMargin)
// For analysis exceptions in silent mode or simple ones that only related to the
// query itself, such as `NoSuchDatabaseException`, only the error is printed out
// to the console.
case _ => err.println(s"""Error in query: ${e.getMessage}""")
}
case _ => err.println(rc.getErrorMessage())
}
driver.close()
return ret
}
val res = new JArrayList[String]()
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CLI_PRINT_HEADER) ||
SparkSQLEnv.sqlContext.conf.cliPrintHeader) {
// Print the column names.
Option(driver.getSchema.getFieldSchemas).foreach { fields =>
out.println(fields.asScala.map(_.getName).mkString("\\t"))
}
}
var counter = 0
try {
while (!out.checkError() && driver.getResults(res)) {
res.asScala.foreach { l =>
counter += 1
out.println(l)
}
res.clear()
}
} catch {
case e: IOException =>
console.printError(
s"""Failed with exception ${e.getClass.getName}: ${e.getMessage}
|${org.apache.hadoop.util.StringUtils.stringifyException(e)}
""".stripMargin)
ret = 1
}
val cret = driver.close()
if (ret == 0) {
ret = cret
}
var responseMsg = s"Time taken: $timeTaken seconds"
if (counter != 0) {
responseMsg += s", Fetched $counter row(s)"
}
console.printInfo(responseMsg, null)
// Destroy the driver to release all the locks.
driver.destroy()
} else {
if (sessionState.getIsVerbose) {
sessionState.out.println(tokens(0) + " " + cmd_1)
}
ret = proc.run(cmd_1).getResponseCode
}
// scalastyle:on println
}
ret
}
}
// Adapted processLine from Hive 2.3's CliDriver.processLine.
override def processLine(line: String, allowInterrupting: Boolean): Int = {
var oldSignal: SignalHandler = null
var interruptSignal: Signal = null
if (allowInterrupting) {
// Remember all threads that were running at the time we started line processing.
// Hook up the custom Ctrl+C handler while processing this line
interruptSignal = new Signal("INT")
oldSignal = Signal.handle(interruptSignal, new SignalHandler() {
private var interruptRequested: Boolean = false
override def handle(signal: Signal): Unit = {
val initialRequest = !interruptRequested
interruptRequested = true
// Kill the VM on second ctrl+c
if (!initialRequest) {
console.printInfo("Exiting the JVM")
System.exit(127)
}
// Interrupt the CLI thread to stop the current statement and return
// to prompt
console.printInfo("Interrupting... Be patient, this might take some time.")
console.printInfo("Press Ctrl+C again to kill JVM")
HiveInterruptUtils.interrupt()
}
})
}
try {
var lastRet: Int = 0
// we can not use "split" function directly as ";" may be quoted
val commands = splitSemiColon(line).asScala
var command: String = ""
for (oneCmd <- commands) {
if (StringUtils.endsWith(oneCmd, "\\\\")) {
command += StringUtils.chop(oneCmd) + ";"
} else {
command += oneCmd
if (!StringUtils.isBlank(command)) {
val ret = processCmd(command)
command = ""
lastRet = ret
val ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS)
if (ret != 0 && !ignoreErrors) {
CommandProcessorFactory.clean(conf.asInstanceOf[HiveConf])
return ret
}
}
}
}
CommandProcessorFactory.clean(conf.asInstanceOf[HiveConf])
lastRet
} finally {
// Once we are done processing the line, restore the old handler
if (oldSignal != null && interruptSignal != null) {
Signal.handle(interruptSignal, oldSignal)
}
}
}
// Adapted splitSemiColon from Hive 2.3's CliDriver.splitSemiColon.
// Note: [SPARK-31595] if there is a `'` in a double quoted string, or a `"` in a single quoted
// string, the origin implementation from Hive will not drop the trailing semicolon as expected,
// hence we refined this function a little bit.
// Note: [SPARK-33100] Ignore a semicolon inside a bracketed comment in spark-sql.
private def splitSemiColon(line: String): JList[String] = {
var insideSingleQuote = false
var insideDoubleQuote = false
var insideSimpleComment = false
var bracketedCommentLevel = 0
var escape = false
var beginIndex = 0
var leavingBracketedComment = false
var isStatement = false
val ret = new JArrayList[String]
def insideBracketedComment: Boolean = bracketedCommentLevel > 0
def insideComment: Boolean = insideSimpleComment || insideBracketedComment
def statementInProgress(index: Int): Boolean = isStatement || (!insideComment &&
index > beginIndex && !s"${line.charAt(index)}".trim.isEmpty)
for (index <- 0 until line.length) {
// Checks if we need to decrement a bracketed comment level; the last character '/' of
// bracketed comments is still inside the comment, so `insideBracketedComment` must keep true
// in the previous loop and we decrement the level here if needed.
if (leavingBracketedComment) {
bracketedCommentLevel -= 1
leavingBracketedComment = false
}
if (line.charAt(index) == '\\'' && !insideComment) {
// take a look to see if it is escaped
// See the comment above about SPARK-31595
if (!escape && !insideDoubleQuote) {
// flip the boolean variable
insideSingleQuote = !insideSingleQuote
}
} else if (line.charAt(index) == '\\"' && !insideComment) {
// take a look to see if it is escaped
// See the comment above about SPARK-31595
if (!escape && !insideSingleQuote) {
// flip the boolean variable
insideDoubleQuote = !insideDoubleQuote
}
} else if (line.charAt(index) == '-') {
val hasNext = index + 1 < line.length
if (insideDoubleQuote || insideSingleQuote || insideComment) {
// Ignores '-' in any case of quotes or comment.
// Avoids to start a comment(--) within a quoted segment or already in a comment.
// Sample query: select "quoted value --"
// ^^ avoids starting a comment if it's inside quotes.
} else if (hasNext && line.charAt(index + 1) == '-') {
// ignore quotes and ; in simple comment
insideSimpleComment = true
}
} else if (line.charAt(index) == ';') {
if (insideSingleQuote || insideDoubleQuote || insideComment) {
// do not split
} else {
if (isStatement) {
// split, do not include ; itself
ret.add(line.substring(beginIndex, index))
}
beginIndex = index + 1
isStatement = false
}
} else if (line.charAt(index) == '\\n') {
// with a new line the inline simple comment should end.
if (!escape) {
insideSimpleComment = false
}
} else if (line.charAt(index) == '/' && !insideSimpleComment) {
val hasNext = index + 1 < line.length
if (insideSingleQuote || insideDoubleQuote) {
// Ignores '/' in any case of quotes
} else if (insideBracketedComment && line.charAt(index - 1) == '*' ) {
// Decrements `bracketedCommentLevel` at the beginning of the next loop
leavingBracketedComment = true
} else if (hasNext && line.charAt(index + 1) == '*') {
bracketedCommentLevel += 1
}
}
// set the escape
if (escape) {
escape = false
} else if (line.charAt(index) == '\\\\') {
escape = true
}
isStatement = statementInProgress(index)
}
if (beginIndex < line.length()) {
ret.add(line.substring(beginIndex))
}
ret
}
}
| holdenk/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala | Scala | apache-2.0 | 23,882 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package models.navigation
/**
* @author hiral
*/
object Menus {
import models.navigation.QuickRoutes._
def clusterMenus(cluster: String) : IndexedSeq[Menu] = IndexedSeq(
Menu("Cluster",IndexedSeq(
"Summary".clusterRouteMenuItem(cluster),
"List".baseRouteMenuItem,
"Add Cluster".baseRouteMenuItem),
None),
"Brokers".clusterMenu(cluster),
Menu("Topic",IndexedSeq(
"List".clusterRouteMenuItem(cluster),
"Create".clusterRouteMenuItem(cluster)),
None),
"Preferred Replica Election".clusterMenu(cluster),
"Reassign Partitions".clusterMenu(cluster)
)
def indexMenu : IndexedSeq[Menu] = IndexedSeq(
Menu("Cluster",IndexedSeq(
"List".baseRouteMenuItem,
"Add Cluster".baseRouteMenuItem),
None)
)
}
| NerdWallet/kafka-manager | app/models/navigation/Menus.scala | Scala | apache-2.0 | 905 |
package spire
package math
import spire.algebra.{Eq, EuclideanRing, Field, PartialOrder, Order, Ring, Signed}
private[spire] trait ScalaEquivWrapper[A] extends scala.math.Equiv[A] {
def eq: Eq[A]
def equiv(x:A, y:A): Boolean = eq.eqv(x, y)
}
private[spire] trait ScalaPartialOrderingWrapper[A] extends scala.math.PartialOrdering[A] {
def partialOrder: PartialOrder[A]
def tryCompare(x:A, y:A): Option[Int] = partialOrder.tryCompare(x, y)
override def equiv(x:A, y:A): Boolean = partialOrder.eqv(x, y)
override def gt(x:A, y:A): Boolean = partialOrder.gt(x, y)
override def gteq(x:A, y:A): Boolean = partialOrder.gteqv(x, y)
override def lt(x:A, y:A): Boolean = partialOrder.lt(x, y)
override def lteq(x:A, y:A): Boolean = partialOrder.lteqv(x, y)
}
private[spire] trait ScalaOrderingWrapper[A] extends spire.scalacompat.ScalaOrderingWrapperCompat[A] {
def order: Order[A]
def compare(x:A, y:A): Int = order.compare(x, y)
override def equiv(x:A, y:A): Boolean = order.eqv(x, y)
override def gt(x:A, y:A): Boolean = order.gt(x, y)
override def gteq(x:A, y:A): Boolean = order.gteqv(x, y)
override def lt(x:A, y:A): Boolean = order.lt(x, y)
override def lteq(x:A, y:A): Boolean = order.lteqv(x, y)
}
private[spire] trait ScalaNumericWrapper[A] extends scala.math.Numeric[A] with ScalaOrderingWrapper[A] {
def structure: Ring[A]
def conversions: ConvertableFrom[A]
def signed: Signed[A]
def order: Order[A]
def fromInt(x: Int): A = structure.fromInt(x)
def negate(x:A): A = structure.negate(x)
def minus(x:A, y:A): A = structure.minus(x, y)
def plus(x:A, y:A): A = structure.plus(x, y)
def times(x:A, y:A): A = structure.times(x, y)
override def zero: A = structure.zero
override def one: A = structure.one
def toDouble(x: A): Double = conversions.toDouble(x)
def toFloat(x: A): Float = conversions.toFloat(x)
def toInt(x: A): Int = conversions.toInt(x)
def toLong(x: A): Long = conversions.toLong(x)
override def signum(x:A): Int = signed.signum(x)
override def abs(x: A): A = signed.abs(x)
// this is an abstract method starting in scala 2.13
def parseString(str: String): Option[A] = None
}
private[spire] trait ScalaFractionalWrapper[A] extends ScalaNumericWrapper[A] with scala.math.Fractional[A] {
def structure: Field[A]
def div(x:A, y:A): A = structure.div(x, y)
}
private[spire] trait ScalaIntegralWrapper[A] extends ScalaNumericWrapper[A] with scala.math.Integral[A]{
def structure: EuclideanRing[A]
def quot(x:A, y:A): A = structure.equot(x, y)
def rem(x:A, y:A): A = structure.emod(x, y)
}
| non/spire | core/src/main/scala/spire/math/ScalaWrappers.scala | Scala | mit | 2,597 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.matryoshka
import matryoshka._
import matryoshka.implicits._
import scalaz._
object OrderR {
def order[T, F[_]: Functor](a: T, b: T)(implicit T: Recursive.Aux[T, F], O: Delay[Order, F]): Ordering =
O(orderR[T, F](O)).order(a.project, b.project)
def orderR[T, F[_]: Functor](ordF: Delay[Order, F])(implicit T: Recursive.Aux[T, F]): Order[T] =
Order.order[T](order[T, F](_, _)(Functor[F], T, ordF))
}
| djspiewak/quasar | foundation/src/main/scala/quasar/contrib/matryoshka/OrderR.scala | Scala | apache-2.0 | 1,033 |
package kr.ne.outsider
import org.springframework.scala.context.function.FunctionalConfigApplicationContext
/**
* Copyright (c) 2013 JeongHoon Byun aka "Outsider", <http://blog.outsider.ne.kr/>
* Licensed under the MIT license.
* <http://outsider.mit-license.org/>
*
* Author: Outsider
* Date: 13. 9. 29.
* Time: 오후 5:50
*/
object HelloApp extends App {
val context = FunctionalConfigApplicationContext[ScalaExampleConfiguration]
// context.registerClass[ScalaExampleConfiguration]
// context.registerClasses(classOf[MyConfiguration])
// context.registerConfigurations(new MyConfiguration)
val helloService = context.getBean(classOf[HelloService])
println(helloService.showSpringCamp)
}
| outsideris/spring-scala-examples | beans-with-spring-scala-using-functionalconfiguration/src/main/scala/kr/ne/outsider/HelloApp.scala | Scala | mit | 710 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.util.Properties
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.Partition
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row, SQLContext, SaveMode}
/**
* Instructions on how to partition the table among workers.
*/
private[sql] case class JDBCPartitioningInfo(
column: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int)
private[sql] object JDBCRelation {
/**
* Given a partitioning schematic (a column of integral type, a number of
* partitions, and upper and lower bounds on the column's value), generate
* WHERE clauses for each partition so that each row in the table appears
* exactly once. The parameters minValue and maxValue are advisory in that
* incorrect values may cause the partitioning to be poor, but no data
* will fail to be represented.
*/
def columnPartition(partitioning: JDBCPartitioningInfo): Array[Partition] = {
if (partitioning == null) return Array[Partition](JDBCPartition(null, 0))
val numPartitions = partitioning.numPartitions
val column = partitioning.column
if (numPartitions == 1) return Array[Partition](JDBCPartition(null, 0))
// Overflow and silliness can happen if you subtract then divide.
// Here we get a little roundoff, but that's (hopefully) OK.
val stride: Long = (partitioning.upperBound / numPartitions
- partitioning.lowerBound / numPartitions)
var i: Int = 0
var currentValue: Long = partitioning.lowerBound
var ans = new ArrayBuffer[Partition]()
while (i < numPartitions) {
val lowerBound = if (i != 0) s"$column >= $currentValue" else null
currentValue += stride
val upperBound = if (i != numPartitions - 1) s"$column < $currentValue" else null
val whereClause =
if (upperBound == null) {
lowerBound
} else if (lowerBound == null) {
upperBound
} else {
s"$lowerBound AND $upperBound"
}
ans += JDBCPartition(whereClause, i)
i = i + 1
}
ans.toArray
}
}
private[sql] case class JDBCRelation(
url: String,
table: String,
parts: Array[Partition],
properties: Properties = new Properties())(@transient val sqlContext: SQLContext)
extends BaseRelation
with PrunedFilteredScan
with InsertableRelation {
override val needConversion: Boolean = false
override val schema: StructType = JDBCRDD.resolveTable(url, table, properties)
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
val driver: String = DriverRegistry.getDriverClassName(url)
// Rely on a type erasure hack to pass RDD[InternalRow] back as RDD[Row]
JDBCRDD.scanTable(
sqlContext.sparkContext,
schema,
driver,
url,
properties,
table,
requiredColumns,
filters,
parts).asInstanceOf[RDD[Row]]
}
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
data.write
.mode(if (overwrite) SaveMode.Overwrite else SaveMode.Append)
.jdbc(url, table, properties)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala | Scala | apache-2.0 | 4,066 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.ArrayBuffer
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfter
import org.apache.spark._
import org.apache.spark.executor.{Executor, TaskMetrics, TaskMetricsSuite}
import org.apache.spark.internal.config.METRICS_CONF
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.metrics.source.JvmSource
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.util._
class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSparkContext {
test("provide metrics sources") {
val filePath = getClass.getClassLoader.getResource("test_metrics_config.properties").getFile
val conf = new SparkConf(loadDefaults = false)
.set(METRICS_CONF, filePath)
sc = new SparkContext("local", "test", conf)
val rdd = sc.makeRDD(1 to 1)
val result = sc.runJob(rdd, (tc: TaskContext, it: Iterator[Int]) => {
tc.getMetricsSources("jvm").count {
case source: JvmSource => true
case _ => false
}
}).sum
assert(result > 0)
}
test("calls TaskCompletionListener after failure") {
TaskContextSuite.completed = false
sc = new SparkContext("local", "test")
val rdd = new RDD[String](sc, List()) {
override def getPartitions = Array[Partition](StubPartition(0))
override def compute(split: Partition, context: TaskContext) = {
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit =
TaskContextSuite.completed = true
})
sys.error("failed")
}
}
val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
val func = (c: TaskContext, i: Iterator[String]) => i.next()
val taskBinary = sc.broadcast(JavaUtils.bufferToArray(closureSerializer.serialize((rdd, func))))
val task = new ResultTask[String, String](
0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties,
closureSerializer.serialize(TaskMetrics.registered).array())
intercept[RuntimeException] {
task.run(0, 0, null, 1, null, Option.empty)
}
assert(TaskContextSuite.completed)
}
test("calls TaskFailureListeners after failure") {
TaskContextSuite.lastError = null
sc = new SparkContext("local", "test")
val rdd = new RDD[String](sc, List()) {
override def getPartitions = Array[Partition](StubPartition(0))
override def compute(split: Partition, context: TaskContext) = {
context.addTaskFailureListener((context, error) => TaskContextSuite.lastError = error)
sys.error("damn error")
}
}
val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
val func = (c: TaskContext, i: Iterator[String]) => i.next()
val taskBinary = sc.broadcast(JavaUtils.bufferToArray(closureSerializer.serialize((rdd, func))))
val task = new ResultTask[String, String](
0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties,
closureSerializer.serialize(TaskMetrics.registered).array())
intercept[RuntimeException] {
task.run(0, 0, null, 1, null, Option.empty)
}
assert(TaskContextSuite.lastError.getMessage == "damn error")
}
test("all TaskCompletionListeners should be called even if some fail") {
val context = TaskContext.empty()
val listener = mock(classOf[TaskCompletionListener])
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = throw new Exception("blah")
})
context.addTaskCompletionListener(listener)
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = throw new Exception("blah")
})
intercept[TaskCompletionListenerException] {
context.markTaskCompleted(None)
}
verify(listener, times(1)).onTaskCompletion(any())
}
test("all TaskFailureListeners should be called even if some fail") {
val context = TaskContext.empty()
val listener = mock(classOf[TaskFailureListener])
context.addTaskFailureListener(new TaskFailureListener {
override def onTaskFailure(context: TaskContext, error: Throwable): Unit =
throw new Exception("exception in listener1")
})
context.addTaskFailureListener(listener)
context.addTaskFailureListener(new TaskFailureListener {
override def onTaskFailure(context: TaskContext, error: Throwable): Unit =
throw new Exception("exception in listener3")
})
val e = intercept[TaskCompletionListenerException] {
context.markTaskFailed(new Exception("exception in task"))
}
// Make sure listener 2 was called.
verify(listener, times(1)).onTaskFailure(any(), any())
// also need to check failure in TaskFailureListener does not mask earlier exception
assert(e.getMessage.contains("exception in listener1"))
assert(e.getMessage.contains("exception in listener3"))
assert(e.getMessage.contains("exception in task"))
}
test("TaskContext.attemptNumber should return attempt number, not task id (SPARK-4014)") {
sc = new SparkContext("local[1,2]", "test") // use maxRetries = 2 because we test failed tasks
// Check that attemptIds are 0 for all tasks' initial attempts
val attemptIds = sc.parallelize(Seq(1, 2), 2).mapPartitions { iter =>
Seq(TaskContext.get().attemptNumber).iterator
}.collect()
assert(attemptIds.toSet === Set(0))
// Test a job with failed tasks
val attemptIdsWithFailedTask = sc.parallelize(Seq(1, 2), 2).mapPartitions { iter =>
val attemptId = TaskContext.get().attemptNumber
if (iter.next() == 1 && attemptId == 0) {
throw new Exception("First execution of task failed")
}
Seq(attemptId).iterator
}.collect()
assert(attemptIdsWithFailedTask.toSet === Set(0, 1))
}
test("TaskContext.stageAttemptNumber getter") {
sc = new SparkContext("local[1,2]", "test")
// Check stageAttemptNumbers are 0 for initial stage
val stageAttemptNumbers = sc.parallelize(Seq(1, 2), 2).mapPartitions { _ =>
Seq(TaskContext.get().stageAttemptNumber()).iterator
}.collect()
assert(stageAttemptNumbers.toSet === Set(0))
// Check stageAttemptNumbers that are resubmitted when tasks have FetchFailedException
val stageAttemptNumbersWithFailedStage =
sc.parallelize(Seq(1, 2, 3, 4), 4).repartition(1).mapPartitions { _ =>
val stageAttemptNumber = TaskContext.get().stageAttemptNumber()
if (stageAttemptNumber < 2) {
// Throw FetchFailedException to explicitly trigger stage resubmission. A normal exception
// will only trigger task resubmission in the same stage.
throw new FetchFailedException(null, 0, 0L, 0, 0, "Fake")
}
Seq(stageAttemptNumber).iterator
}.collect()
assert(stageAttemptNumbersWithFailedStage.toSet === Set(2))
}
test("accumulators are updated on exception failures") {
// This means use 1 core and 4 max task failures
sc = new SparkContext("local[1,4]", "test")
// Create 2 accumulators, one that counts failed values and another that doesn't
val acc1 = AccumulatorSuite.createLongAccum("x", true)
val acc2 = AccumulatorSuite.createLongAccum("y", false)
// Fail first 3 attempts of every task. This means each task should be run 4 times.
sc.parallelize(1 to 10, 10).map { i =>
acc1.add(1)
acc2.add(1)
if (TaskContext.get.attemptNumber() <= 2) {
throw new Exception("you did something wrong")
} else {
0
}
}.count()
// The one that counts failed values should be 4x the one that didn't,
// since we ran each task 4 times
assert(AccumulatorContext.get(acc1.id).get.value === 40L)
assert(AccumulatorContext.get(acc2.id).get.value === 10L)
}
test("failed tasks collect only accumulators whose values count during failures") {
sc = new SparkContext("local", "test")
val acc1 = AccumulatorSuite.createLongAccum("x", false)
val acc2 = AccumulatorSuite.createLongAccum("y", true)
acc1.add(1)
acc2.add(1)
// Create a dummy task. We won't end up running this; we just want to collect
// accumulator updates from it.
val taskMetrics = TaskMetrics.empty
val task = new Task[Int](0, 0, 0) {
context = new TaskContextImpl(0, 0, 0, 0L, 0,
new TaskMemoryManager(SparkEnv.get.memoryManager, 0L),
new Properties,
SparkEnv.get.metricsSystem,
taskMetrics)
taskMetrics.registerAccumulator(acc1)
taskMetrics.registerAccumulator(acc2)
override def runTask(tc: TaskContext): Int = 0
}
// First, simulate task success. This should give us all the accumulators.
val accumUpdates1 = task.collectAccumulatorUpdates(taskFailed = false)
TaskMetricsSuite.assertUpdatesEquals(accumUpdates1.takeRight(2), Seq(acc1, acc2))
// Now, simulate task failures. This should give us only the accums that count failed values.
val accumUpdates2 = task.collectAccumulatorUpdates(taskFailed = true)
TaskMetricsSuite.assertUpdatesEquals(accumUpdates2.takeRight(1), Seq(acc2))
}
test("only updated internal accumulators will be sent back to driver") {
sc = new SparkContext("local", "test")
// Create a dummy task. We won't end up running this; we just want to collect
// accumulator updates from it.
val taskMetrics = TaskMetrics.registered
val task = new Task[Int](0, 0, 0) {
context = new TaskContextImpl(0, 0, 0, 0L, 0,
new TaskMemoryManager(SparkEnv.get.memoryManager, 0L),
new Properties,
SparkEnv.get.metricsSystem,
taskMetrics)
taskMetrics.incMemoryBytesSpilled(10)
override def runTask(tc: TaskContext): Int = 0
}
val updatedAccums = task.collectAccumulatorUpdates()
assert(updatedAccums.length == 2)
// the RESULT_SIZE accumulator will be sent back anyway.
assert(updatedAccums(0).name == Some(InternalAccumulator.RESULT_SIZE))
assert(updatedAccums(0).value == 0)
assert(updatedAccums(1).name == Some(InternalAccumulator.MEMORY_BYTES_SPILLED))
assert(updatedAccums(1).value == 10)
}
test("localProperties are propagated to executors correctly") {
sc = new SparkContext("local", "test")
sc.setLocalProperty("testPropKey", "testPropValue")
val res = sc.parallelize(Array(1), 1).map(i => i).map(i => {
val inTask = TaskContext.get().getLocalProperty("testPropKey")
val inDeser = Executor.taskDeserializationProps.get().getProperty("testPropKey")
s"$inTask,$inDeser"
}).collect()
assert(res === Array("testPropValue,testPropValue"))
}
test("immediately call a completion listener if the context is completed") {
var invocations = 0
val context = TaskContext.empty()
context.markTaskCompleted(None)
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit =
invocations += 1
})
assert(invocations == 1)
context.markTaskCompleted(None)
assert(invocations == 1)
}
test("immediately call a failure listener if the context has failed") {
var invocations = 0
var lastError: Throwable = null
val error = new RuntimeException
val context = TaskContext.empty()
context.markTaskFailed(error)
context.addTaskFailureListener(new TaskFailureListener {
override def onTaskFailure(context: TaskContext, e: Throwable): Unit = {
lastError = e
invocations += 1
}
})
assert(lastError == error)
assert(invocations == 1)
context.markTaskFailed(error)
assert(lastError == error)
assert(invocations == 1)
}
test("TaskCompletionListenerException.getMessage should include previousError") {
val listenerErrorMessage = "exception in listener"
val taskErrorMessage = "exception in task"
val e = new TaskCompletionListenerException(
Seq(listenerErrorMessage),
Some(new RuntimeException(taskErrorMessage)))
assert(e.getMessage.contains(listenerErrorMessage) && e.getMessage.contains(taskErrorMessage))
}
test("all TaskCompletionListeners should be called even if some fail or a task") {
val context = TaskContext.empty()
val listener = mock(classOf[TaskCompletionListener])
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit =
throw new Exception("exception in listener1")
})
context.addTaskCompletionListener(listener)
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit =
throw new Exception("exception in listener3")
})
val e = intercept[TaskCompletionListenerException] {
context.markTaskCompleted(Some(new Exception("exception in task")))
}
// Make sure listener 2 was called.
verify(listener, times(1)).onTaskCompletion(any())
// also need to check failure in TaskCompletionListener does not mask earlier exception
assert(e.getMessage.contains("exception in listener1"))
assert(e.getMessage.contains("exception in listener3"))
assert(e.getMessage.contains("exception in task"))
}
test("listener registers another listener (reentrancy)") {
val context = TaskContext.empty()
var invocations = 0
val simpleListener = new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
invocations += 1
}
}
// Create a listener that registers another listener.
val reentrantListener = new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
context.addTaskCompletionListener(simpleListener)
invocations += 1
}
}
context.addTaskCompletionListener(reentrantListener)
// Ensure the listener can execute without encountering deadlock.
assert(invocations == 0)
context.markTaskCompleted(None)
assert(invocations == 2)
}
test("listener registers another listener using a second thread") {
val context = TaskContext.empty()
val invocations = new AtomicInteger(0)
val simpleListener = new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
invocations.getAndIncrement()
}
}
// Create a listener that registers another listener using a second thread.
val multithreadedListener = new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
val thread = new Thread(new Runnable {
override def run(): Unit = {
context.addTaskCompletionListener(simpleListener)
}
})
thread.start()
invocations.getAndIncrement()
thread.join()
}
}
context.addTaskCompletionListener(multithreadedListener)
// Ensure the listener can execute without encountering deadlock.
assert(invocations.get() == 0)
context.markTaskCompleted(None)
assert(invocations.get() == 2)
}
test("listeners registered from different threads are called sequentially") {
val context = TaskContext.empty()
val invocations = new AtomicInteger(0)
val numRunningListeners = new AtomicInteger(0)
// Create a listener that will throw if more than one instance is running at the same time.
val registerExclusiveListener = new Runnable {
override def run(): Unit = {
context.addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
if (numRunningListeners.getAndIncrement() != 0) throw new Exception()
Thread.sleep(100)
if (numRunningListeners.decrementAndGet() != 0) throw new Exception()
invocations.getAndIncrement()
}
})
}
}
// Register it multiple times from different threads before and after the task completes.
assert(invocations.get() == 0)
assert(numRunningListeners.get() == 0)
val thread1 = new Thread(registerExclusiveListener)
val thread2 = new Thread(registerExclusiveListener)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
assert(invocations.get() == 0)
context.markTaskCompleted(None)
assert(invocations.get() == 2)
val thread3 = new Thread(registerExclusiveListener)
val thread4 = new Thread(registerExclusiveListener)
thread3.start()
thread4.start()
thread3.join()
thread4.join()
assert(invocations.get() == 4)
assert(numRunningListeners.get() == 0)
}
test("listeners registered from same thread are called in reverse order") {
val context = TaskContext.empty()
val invocationOrder = ArrayBuffer.empty[String]
// Create listeners that log an id to `invocationOrder` when they are invoked.
def makeLoggingListener(id: String): TaskCompletionListener = new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
invocationOrder += id
}
}
context.addTaskCompletionListener(makeLoggingListener("A"))
context.addTaskCompletionListener(makeLoggingListener("B"))
context.addTaskCompletionListener(makeLoggingListener("C"))
// Ensure the listeners are called in reverse order of registration, except when they are called
// after the task is complete.
assert(invocationOrder === Seq.empty)
context.markTaskCompleted(None)
assert(invocationOrder === Seq("C", "B", "A"))
context.addTaskCompletionListener(makeLoggingListener("D"))
assert(invocationOrder === Seq("C", "B", "A", "D"))
}
}
private object TaskContextSuite {
@volatile var completed = false
@volatile var lastError: Throwable = _
}
private case class StubPartition(index: Int) extends Partition
| ueshin/apache-spark | core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala | Scala | apache-2.0 | 18,943 |
package scalajs.antdesign
import japgolly.scalajs.react.{CallbackTo, React, ReactComponentU_, ReactNode}
import moment.Date
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
import scala.scalajs.js.{Dynamic, Object}
/**
* @see https://ant.design/components/calendar/#API
* @param value set date
* @param defaultValue set default date
* @param mode can be set to month or year
* @param fullScreen to set whether full-screen display
* @param dateCellRender to set the way of renderer the date cell
* @param monthCellRender to set the way of renderer the month cell
* @param locale set locale
* @param onPanelChange the callback when panel change
*/
case class Calendar(value: js.UndefOr[Date] = js.undefined,
defaultValue: js.UndefOr[Date] = js.undefined,
mode: js.UndefOr[Calendar.Mode] = js.undefined,
fullScreen: js.UndefOr[Boolean] = js.undefined,
dateCellRender: js.UndefOr[Date => CallbackTo[ReactNode]] = js.undefined,
monthCellRender: js.UndefOr[Date => CallbackTo[ReactNode]] = js.undefined,
locale: js.UndefOr[Calendar.Locale] = js.undefined,
onPanelChange: js.UndefOr[(Date, Calendar.Mode) => CallbackTo[Unit]] = js.undefined) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
value.foreach { x =>
p.updateDynamic("value")(x)
}
defaultValue.foreach { x =>
p.updateDynamic("defaultValue")(x)
}
mode.foreach { x =>
p.updateDynamic("mode")(x.id)
}
fullScreen.foreach { x =>
p.updateDynamic("fullScreen")(x)
}
dateCellRender.foreach { x =>
p.updateDynamic("dateCellRender")(
(v1: Date) => x(v1).runNow()
)
}
monthCellRender.foreach { x =>
p.updateDynamic("dateCellRender")(
(v1: Date) => x(v1).runNow()
)
}
locale.foreach { x =>
p.updateDynamic("locale")(x.toJS)
}
onPanelChange.foreach { x =>
p.updateDynamic("onPanelChange")(
(v1: Date, v2: Calendar.Mode) => x(v1, v2).runNow()
)
}
p
}
def apply(children: ReactNode*): ReactComponentU_ = {
val f =
React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.antd.Calendar)
f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
object Calendar {
sealed abstract class Mode(val id: String)
object Mode {
case object Month extends Mode("month")
case object Year extends Mode("year")
}
case class Locale(
today: String,
now: String,
backToToday: String,
ok: String,
clear: String,
month: String,
year: String,
timeSelect: String,
dateSelect: String,
monthSelect: String,
yearSelect: String,
decadeSelect: String,
yearFormat: String,
dateFormat: String,
dayFormat: String,
dateTimeFormat: String,
monthFormat: String,
monthBeforeYear: js.UndefOr[Boolean],
previousMonth: String,
nextMonth: String,
previousYear: String,
nextYear: String,
previousDecade: String,
nextDecade: String,
previousCentury: String,
nextCentury: String
) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
p.updateDynamic("today")(today)
p.updateDynamic("now")(now)
p.updateDynamic("backToToday")(backToToday)
p.updateDynamic("ok")(ok)
p.updateDynamic("clear")(clear)
p.updateDynamic("month")(month)
p.updateDynamic("year")(year)
p.updateDynamic("timeSelect")(timeSelect)
p.updateDynamic("dateSelect")(dateSelect)
p.updateDynamic("monthSelect")(monthSelect)
p.updateDynamic("yearSelect")(yearSelect)
p.updateDynamic("decadeSelect")(decadeSelect)
p.updateDynamic("yearFormat")(yearFormat)
p.updateDynamic("dateFormat")(dateFormat)
p.updateDynamic("dayFormat")(dayFormat)
p.updateDynamic("dateTimeFormat")(dateTimeFormat)
p.updateDynamic("monthFormat")(monthFormat)
monthBeforeYear.foreach { x =>
p.updateDynamic("monthBeforeYear")(x)
}
p.updateDynamic("previousMonth")(previousMonth)
p.updateDynamic("nextMonth")(nextMonth)
p.updateDynamic("previousYear")(previousYear)
p.updateDynamic("nextYear")(nextYear)
p.updateDynamic("previousDecade")(previousDecade)
p.updateDynamic("nextDecade")(nextDecade)
p.updateDynamic("previousCentury")(previousCentury)
p.updateDynamic("nextCentury")(nextCentury)
p
}
def apply(children: ReactNode*): ReactComponentU_ = {
val f =
React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.antd.Calendar)
f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
@js.native
private trait LocaleReference extends js.Any {
val today: String = js.native
val now: String = js.native
val backToToday: String = js.native
val ok: String = js.native
val clear: String = js.native
val month: String = js.native
val year: String = js.native
val timeSelect: String = js.native
val dateSelect: String = js.native
val monthSelect: String = js.native
val yearSelect: String = js.native
val decadeSelect: String = js.native
val yearFormat: String = js.native
val dateFormat: String = js.native
val dayFormat: String = js.native
val dateTimeFormat: String = js.native
val monthFormat: String = js.native
val monthBeforeYear: js.UndefOr[Boolean] = js.native
val previousMonth: String = js.native
val nextMonth: String = js.native
val previousYear: String = js.native
val nextYear: String = js.native
val previousDecade: String = js.native
val nextDecade: String = js.native
val previousCentury: String = js.native
val nextCentury: String = js.native
}
private def toLocale(localeReference: LocaleReference): Locale = Locale(
localeReference.today,
localeReference.now,
localeReference.backToToday,
localeReference.ok,
localeReference.clear,
localeReference.month,
localeReference.year,
localeReference.timeSelect,
localeReference.dateSelect,
localeReference.monthSelect,
localeReference.yearSelect,
localeReference.decadeSelect,
localeReference.yearFormat,
localeReference.dateFormat,
localeReference.dayFormat,
localeReference.dateTimeFormat,
localeReference.monthFormat,
localeReference.monthBeforeYear,
localeReference.previousMonth,
localeReference.nextMonth,
localeReference.previousYear,
localeReference.nextYear,
localeReference.previousDecade,
localeReference.nextDecade,
localeReference.previousCentury,
localeReference.nextCentury
)
@js.native
@JSImport("lib/calendar/locale/cs_CZ.js", JSImport.Default)
private object _cs_CZ extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/da_DK.js", JSImport.Default)
private object _da_DK extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/de_DE.js", JSImport.Default)
private object _de_DE extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/en_US.js", JSImport.Default)
private object _en_US extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/fr_BE.js", JSImport.Default)
private object _fr_BE extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/ja_JP.js", JSImport.Default)
private object _ja_JP extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/ko_KR.js", JSImport.Default)
private object _ko_KR extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/pl_PL.js", JSImport.Default)
private object _pl_PL extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/pt_BR.js", JSImport.Default)
private object _pt_BR extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/ru_RU.js", JSImport.Default)
private object _ru_RU extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/sv_SE.js", JSImport.Default)
private object _sv_SE extends LocaleReference
@js.native
@JSImport("lib/calendar/locale/zh_CN.js", JSImport.Default)
private object _zh_CN extends LocaleReference
object Locale {
val cs_CZ: Locale = toLocale(_cs_CZ)
val da_DK: Locale = toLocale(_da_DK)
val de_DE: Locale = toLocale(_de_DE)
val en_US: Locale = toLocale(_en_US)
val fr_BE: Locale = toLocale(_fr_BE)
val ja_JP: Locale = toLocale(_ja_JP)
val ko_KR: Locale = toLocale(_ko_KR)
val pl_PL: Locale = toLocale(_pl_PL)
val pt_BR: Locale = toLocale(_pt_BR)
val ru_RU: Locale = toLocale(_ru_RU)
val sv_SE: Locale = toLocale(_sv_SE)
val zh_CN: Locale = toLocale(_zh_CN)
}
}
| mdedetrich/scalajs-antdesign | src/main/scala/scalajs/antdesign/Calendar.scala | Scala | bsd-3-clause | 9,289 |
package actors
import language.postfixOps
import scala.concurrent.duration._
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.RootActorPath
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.CurrentClusterState
import akka.cluster.ClusterEvent.MemberUp
import akka.cluster.Member
import akka.cluster.MemberStatus
import com.typesafe.config.ConfigFactory
//#backend
class TransformationBackend extends Actor with ActorLogging {
val cluster = Cluster(context.system)
// subscribe to cluster changes, MemberUp
// re-subscribe when restart
override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp])
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case TransformationJob(text) => {
log.info("Received: {}", text)
sender ! TransformationResult(text.toUpperCase)
}
case state: CurrentClusterState =>
state.members.filter(_.status == MemberStatus.Up) foreach register
case MemberUp(m) => register(m)
}
def register(member: Member): Unit =
if (member.hasRole("frontend"))
context.actorSelection(RootActorPath(member.address) / "user" / "frontend") !
BackendRegistration
}
//#backend
object TransformationBackend {
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val port = if (args.isEmpty) "0" else args(0)
val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port").
withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
system.actorOf(Props[TransformationBackend], name = "backend")
}
}
| luzhuomi/playakkahbase | scala/playAkka/app/actors/TransformationBackend.scala | Scala | gpl-2.0 | 1,842 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.tools.collection
import scala.collection.mutable.Set
import scala.collection.mutable.WeakHashMap
//TODO?reimplemented a true weak hashset
class WeakHashSet[A] extends Set[A] {
val _values = new WeakHashMap[A, AnyRef]
override def contains(key: A): Boolean = _values.contains(key)
override def iterator: Iterator[A] = _values.keysIterator
override def +=(elt: A): this.type = { _values(elt) = None; this }
override def -=(elt: A): this.type = { _values -= elt; this }
override def empty: this.type = { _values.empty; this }
override def size = _values.size
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.tools/src/main/scala/org/openmole/core/tools/collection/WeakHashSet.scala | Scala | agpl-3.0 | 1,320 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{File, ObjectInputStream}
import java.net.URL
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration._
import scala.util.control.NonFatal
import com.google.common.util.concurrent.MoreExecutors
import org.mockito.ArgumentCaptor
import org.mockito.ArgumentMatchers.{any, anyLong}
import org.mockito.Mockito.{spy, times, verify}
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.TestUtils.JavaSourceFromString
import org.apache.spark.internal.config.Network.RPC_MESSAGE_MAX_SIZE
import org.apache.spark.storage.TaskResultBlockId
import org.apache.spark.util.{MutableURLClassLoader, RpcUtils, Utils}
/**
* Removes the TaskResult from the BlockManager before delegating to a normal TaskResultGetter.
*
* Used to test the case where a BlockManager evicts the task result (or dies) before the
* TaskResult is retrieved.
*/
private class ResultDeletingTaskResultGetter(sparkEnv: SparkEnv, scheduler: TaskSchedulerImpl)
extends TaskResultGetter(sparkEnv, scheduler) {
var removedResult = false
@volatile var removeBlockSuccessfully = false
override def enqueueSuccessfulTask(
taskSetManager: TaskSetManager, tid: Long, serializedData: ByteBuffer): Unit = {
if (!removedResult) {
// Only remove the result once, since we'd like to test the case where the task eventually
// succeeds.
serializer.get().deserialize[TaskResult[_]](serializedData) match {
case IndirectTaskResult(blockId, _) =>
sparkEnv.blockManager.master.removeBlock(blockId)
// removeBlock is asynchronous. Need to wait it's removed successfully
try {
eventually(timeout(3.seconds), interval(200.milliseconds)) {
assert(!sparkEnv.blockManager.master.contains(blockId))
}
removeBlockSuccessfully = true
} catch {
case NonFatal(e) => removeBlockSuccessfully = false
}
case _: DirectTaskResult[_] =>
taskSetManager.abort("Internal error: expect only indirect results")
}
serializedData.rewind()
removedResult = true
}
super.enqueueSuccessfulTask(taskSetManager, tid, serializedData)
}
}
private class DummyTaskSchedulerImpl(sc: SparkContext)
extends TaskSchedulerImpl(sc, 1, true) {
override def handleFailedTask(
taskSetManager: TaskSetManager,
tid: Long,
taskState: TaskState,
reason: TaskFailedReason): Unit = {
// do nothing
}
}
/**
* A [[TaskResultGetter]] that stores the [[DirectTaskResult]]s it receives from executors
* _before_ modifying the results in any way.
*/
private class MyTaskResultGetter(env: SparkEnv, scheduler: TaskSchedulerImpl)
extends TaskResultGetter(env, scheduler) {
// Use the current thread so we can access its results synchronously
protected override val getTaskResultExecutor = MoreExecutors.sameThreadExecutor()
// DirectTaskResults that we receive from the executors
private val _taskResults = new ArrayBuffer[DirectTaskResult[_]]
def taskResults: Seq[DirectTaskResult[_]] = _taskResults
override def enqueueSuccessfulTask(tsm: TaskSetManager, tid: Long, data: ByteBuffer): Unit = {
// work on a copy since the super class still needs to use the buffer
val newBuffer = data.duplicate()
_taskResults += env.closureSerializer.newInstance().deserialize[DirectTaskResult[_]](newBuffer)
super.enqueueSuccessfulTask(tsm, tid, data)
}
}
/**
* Tests related to handling task results (both direct and indirect).
*/
class TaskResultGetterSuite extends SparkFunSuite with BeforeAndAfter with LocalSparkContext {
// Set the RPC message size to be as small as possible (it must be an integer, so 1 is as small
// as we can make it) so the tests don't take too long.
def conf: SparkConf = new SparkConf().set(RPC_MESSAGE_MAX_SIZE, 1)
test("handling results smaller than max RPC message size") {
sc = new SparkContext("local", "test", conf)
val result = sc.parallelize(Seq(1), 1).map(x => 2 * x).reduce((x, y) => x)
assert(result === 2)
}
test("handling results larger than max RPC message size") {
sc = new SparkContext("local", "test", conf)
val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
val result =
sc.parallelize(Seq(1), 1).map(x => 1.to(maxRpcMessageSize).toArray).reduce((x, y) => x)
assert(result === 1.to(maxRpcMessageSize).toArray)
val RESULT_BLOCK_ID = TaskResultBlockId(0)
assert(sc.env.blockManager.master.getLocations(RESULT_BLOCK_ID).size === 0,
"Expect result to be removed from the block manager.")
}
test("handling total size of results larger than maxResultSize") {
sc = new SparkContext("local", "test", conf)
val scheduler = new DummyTaskSchedulerImpl(sc)
val spyScheduler = spy(scheduler)
val resultGetter = new TaskResultGetter(sc.env, spyScheduler)
scheduler.taskResultGetter = resultGetter
val myTsm = new TaskSetManager(spyScheduler, FakeTask.createTaskSet(2), 1) {
// always returns false
override def canFetchMoreResults(size: Long): Boolean = false
}
val indirectTaskResult = IndirectTaskResult(TaskResultBlockId(0), 0)
val directTaskResult = new DirectTaskResult(ByteBuffer.allocate(0), Nil, Array())
val ser = sc.env.closureSerializer.newInstance()
val serializedIndirect = ser.serialize(indirectTaskResult)
val serializedDirect = ser.serialize(directTaskResult)
resultGetter.enqueueSuccessfulTask(myTsm, 0, serializedDirect)
resultGetter.enqueueSuccessfulTask(myTsm, 1, serializedIndirect)
eventually(timeout(1.second)) {
verify(spyScheduler, times(1)).handleFailedTask(
myTsm, 0, TaskState.KILLED, TaskKilled("Tasks result size has exceeded maxResultSize"))
verify(spyScheduler, times(1)).handleFailedTask(
myTsm, 1, TaskState.KILLED, TaskKilled("Tasks result size has exceeded maxResultSize"))
}
}
test("task retried if result missing from block manager") {
// Set the maximum number of task failures to > 0, so that the task set isn't aborted
// after the result is missing.
sc = new SparkContext("local[1,2]", "test", conf)
// If this test hangs, it's probably because no resource offers were made after the task
// failed.
val scheduler: TaskSchedulerImpl = sc.taskScheduler match {
case taskScheduler: TaskSchedulerImpl =>
taskScheduler
case _ =>
assert(false, "Expect local cluster to use TaskSchedulerImpl")
throw new ClassCastException
}
val resultGetter = new ResultDeletingTaskResultGetter(sc.env, scheduler)
scheduler.taskResultGetter = resultGetter
val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
val result =
sc.parallelize(Seq(1), 1).map(x => 1.to(maxRpcMessageSize).toArray).reduce((x, y) => x)
assert(resultGetter.removeBlockSuccessfully)
assert(result === 1.to(maxRpcMessageSize).toArray)
// Make sure two tasks were run (one failed one, and a second retried one).
assert(scheduler.nextTaskId.get() === 2)
}
/**
* Make sure we are using the context classloader when deserializing failed TaskResults instead
* of the Spark classloader.
* This test compiles a jar containing an exception and tests that when it is thrown on the
* executor, enqueueFailedTask can correctly deserialize the failure and identify the thrown
* exception as the cause.
* Before this fix, enqueueFailedTask would throw a ClassNotFoundException when deserializing
* the exception, resulting in an UnknownReason for the TaskEndResult.
*/
test("failed task deserialized with the correct classloader (SPARK-11195)") {
// compile a small jar containing an exception that will be thrown on an executor.
val tempDir = Utils.createTempDir()
val srcDir = new File(tempDir, "repro/")
srcDir.mkdirs()
val excSource = new JavaSourceFromString(new File(srcDir, "MyException").toURI.getPath,
"""package repro;
|
|public class MyException extends Exception {
|}
""".stripMargin)
val excFile = TestUtils.createCompiledClass("MyException", srcDir, excSource, Seq.empty)
val jarFile = new File(tempDir, "testJar-%s.jar".format(System.currentTimeMillis()))
TestUtils.createJar(Seq(excFile), jarFile, directoryPrefix = Some("repro"))
// ensure we reset the classloader after the test completes
val originalClassLoader = Thread.currentThread.getContextClassLoader
val loader = new MutableURLClassLoader(new Array[URL](0), originalClassLoader)
Utils.tryWithSafeFinally {
// load the exception from the jar
loader.addURL(jarFile.toURI.toURL)
Thread.currentThread().setContextClassLoader(loader)
val excClass: Class[_] = Utils.classForName("repro.MyException")
// NOTE: we must run the cluster with "local" so that the executor can load the compiled
// jar.
sc = new SparkContext("local", "test", conf)
val rdd = sc.parallelize(Seq(1), 1).map { _ =>
val exc = excClass.getConstructor().newInstance().asInstanceOf[Exception]
throw exc
}
// the driver should not have any problems resolving the exception class and determining
// why the task failed.
val exceptionMessage = intercept[SparkException] {
rdd.collect()
}.getMessage
val expectedFailure = """(?s).*Lost task.*: repro.MyException.*""".r
val unknownFailure = """(?s).*Lost task.*: UnknownReason.*""".r
assert(expectedFailure.findFirstMatchIn(exceptionMessage).isDefined)
assert(unknownFailure.findFirstMatchIn(exceptionMessage).isEmpty)
} {
Thread.currentThread.setContextClassLoader(originalClassLoader)
loader.close()
}
}
test("task result size is set on the driver, not the executors") {
import InternalAccumulator._
// Set up custom TaskResultGetter and TaskSchedulerImpl spy
sc = new SparkContext("local", "test", conf)
val scheduler = sc.taskScheduler.asInstanceOf[TaskSchedulerImpl]
val spyScheduler = spy(scheduler)
val resultGetter = new MyTaskResultGetter(sc.env, spyScheduler)
val newDAGScheduler = new DAGScheduler(sc, spyScheduler)
scheduler.taskResultGetter = resultGetter
sc.dagScheduler = newDAGScheduler
sc.taskScheduler = spyScheduler
sc.taskScheduler.setDAGScheduler(newDAGScheduler)
// Just run 1 task and capture the corresponding DirectTaskResult
sc.parallelize(1 to 1, 1).count()
val captor = ArgumentCaptor.forClass(classOf[DirectTaskResult[_]])
verify(spyScheduler, times(1)).handleSuccessfulTask(any(), anyLong(), captor.capture())
// When a task finishes, the executor sends a serialized DirectTaskResult to the driver
// without setting the result size so as to avoid serializing the result again. Instead,
// the result size is set later in TaskResultGetter on the driver before passing the
// DirectTaskResult on to TaskSchedulerImpl. In this test, we capture the DirectTaskResult
// before and after the result size is set.
assert(resultGetter.taskResults.size === 1)
val resBefore = resultGetter.taskResults.head
val resAfter = captor.getValue
val resSizeBefore = resBefore.accumUpdates.find(_.name == Some(RESULT_SIZE)).map(_.value)
val resSizeAfter = resAfter.accumUpdates.find(_.name == Some(RESULT_SIZE)).map(_.value)
assert(resSizeBefore.exists(_ == 0L))
assert(resSizeAfter.exists(_.toString.toLong > 0L))
}
test("failed task is handled when error occurs deserializing the reason") {
sc = new SparkContext("local", "test", conf)
val rdd = sc.parallelize(Seq(1), 1).map { _ =>
throw new UndeserializableException
}
val message = intercept[SparkException] {
rdd.collect()
}.getMessage
// Job failed, even though the failure reason is unknown.
val unknownFailure = """(?s).*Lost task.*: UnknownReason.*""".r
assert(unknownFailure.findFirstMatchIn(message).isDefined)
}
}
private class UndeserializableException extends Exception {
private def readObject(in: ObjectInputStream): Unit = {
// scalastyle:off throwerror
throw new NoClassDefFoundError()
// scalastyle:on throwerror
}
}
| caneGuy/spark | core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala | Scala | apache-2.0 | 13,225 |
/*
* Copyright (c) 2010-2015 SAP SE.
* 2016-2018 The University of Sheffield.
*
* All rights reserved. This program and the accompanying materials
* This program and the accompanying materials are made
* available under the terms of the Eclipse Public License 2.0
* which is available at https://www.eclipse.org/legal/epl-2.0/
*
* SPDX-License-Identifier: EPL-2.0
*/
package com.logicalhacking.dasca.crosslanguage.builder.algorithms
import scala.collection.JavaConverters.asScalaIteratorConverter
import scala.collection.JavaConverters.collectionAsScalaIterableConverter
import org.slf4j.LoggerFactory
import com.ibm.wala.ipa.callgraph.CGNode
import com.ibm.wala.ipa.callgraph.CallGraph
import com.ibm.wala.ipa.cfg.BasicBlockInContext
import com.ibm.wala.shrikeBT.IConditionalBranchInstruction
import com.ibm.wala.ssa.SSAAbstractInvokeInstruction
import com.ibm.wala.ssa.SSAConditionalBranchInstruction
import com.ibm.wala.ssa.analysis.ExplodedControlFlowGraph
import com.ibm.wala.ssa.analysis.IExplodedBasicBlock
import com.typesafe.scalalogging.Logger
import scala.{ Option => ? }
class ExecuteActionBasedChecker(val cg: CallGraph,
val keep: CGNode => Boolean,
val action: String,
val execNode: CGNode) extends ReachabilityChecker(cg, keep) {
val symbolTable = execNode.getIR.getSymbolTable
override val logger = Logger(LoggerFactory.getLogger(getClass.toString))
override def canPassThrough(toBB: IExplodedBasicBlock,
predBB: IExplodedBasicBlock): Boolean = {
// Asuming both basic blocks are from the execNode
predBB.getLastInstruction match {
case cond: SSAConditionalBranchInstruction if cond.getOperator == IConditionalBranchInstruction.Operator.EQ => {
val vl = cond.getUse(0)
val vr = cond.getUse(1)
// The right side (vr) is apparently always either one or zero in conditional eq instructions
// We are only looking for equal calls on the second method parameter of the cordova execute method,
// which is "action"
val nonActionUseInEquals = isEqualsAndDependsOnActionParam(vl)
if (nonActionUseInEquals != -1 && symbolTable.isConstant(nonActionUseInEquals) &&
symbolTable.isStringConstant(nonActionUseInEquals)) {
val actionString = symbolTable.getStringValue(nonActionUseInEquals)
if (actionString.equals(action) && !isTrueBranch(vr, toBB, predBB)) {
return false
} else if (!actionString.equals(action) && isTrueBranch(vr, toBB, predBB)) {
return false
} else {
return true
}
} else {
return true
}
}
case _ => true
}
}
def isEqualsAndDependsOnActionParam(v: Int): Int = {
for (
invoke <- ?(execNode.getDU.getDef(v)).collect({ case i: SSAAbstractInvokeInstruction => i });
if (invoke.getDeclaredTarget.toString().contains("equal"))
) {
if (invoke.getUse(0) == 2) {
return invoke.getUse(1)
} else if (invoke.getUse(1) == 2) {
return invoke.getUse(0)
}
}
return -1
}
def isTrueBranch(v: Int,
toBB: IExplodedBasicBlock,
predBB: IExplodedBasicBlock): Boolean = {
if (symbolTable.isOneOrTrue(v) && predBB.getLastInstructionIndex + 1 != toBB.getLastInstructionIndex) {
return true
} else if (symbolTable.isZeroOrFalse(v) && predBB.getLastInstructionIndex + 1 == toBB.getLastInstructionIndex) {
return true
} else {
return false
}
}
def extraPredNodes(node: CGNode): List[CGNode] = {
if (node.getMethod.getName.toString().equals("run")) {
for (
interf <- node.getMethod.getDeclaringClass.getAllImplementedInterfaces.asScala;
if (interf.getName().toString().equals("Ljava/lang/Runnable"));
n <- cg.iterator().asScala;
if (n.getMethod.getDeclaringClass == node.getMethod.getDeclaringClass);
if (n.getMethod.getName.toString() == "<init>")
) {
return List(n)
}
}
return List.empty
}
}
| DASPA/DASCA | com.logicalhacking.dasca.crosslanguage/src/main/scala/com/logicalhacking/dasca/crosslanguage/builder/algorithms/ExecuteActionBasedChecker.scala | Scala | epl-1.0 | 4,191 |
package models.translations
import java.util._;
import javax.persistence._;
import javax.validation.constraints._;
import com.avaje.ebean.Model;
import play.data.format._;
import play.data.validation._;
import models.user.UserSession
import play.api.libs.json.Json
import play.api.libs.json._
import models.PaymentMethod
@Entity
class TranslationValue extends Model {
@Id
var id: Int = _
@NotNull
var objectId: Int = _
@NotNull
var field: String = _
@NotNull
var language: String = _
@Column(columnDefinition = "TEXT")
var value: String = _
@ManyToOne()
var objectType: TranslationObject = _
}
object TranslationValue {
implicit object TranslationValueFormat extends Format[TranslationValue] {
def writes(translationValue: TranslationValue): JsValue = {
val invoiceSeq = Seq(
"language" -> Json.toJson(translationValue.language),
"value" -> JsString(translationValue.value)
)
JsObject(invoiceSeq)
}
def reads(json: JsValue): JsResult[TranslationValue] = {
JsSuccess(new TranslationValue())
}
}
def finder: Model.Finder[Long, TranslationValue] = new Model.Finder[Long, TranslationValue](classOf[TranslationValue]);
} | marcin-lawrowski/felicia | app/models/translations/TranslationValue.scala | Scala | gpl-3.0 | 1,196 |
package shield.actors.config.upstream
import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.{MustMatchers, WordSpecLike}
import shield.actors.config.{ServiceDetails, UpstreamAggregatorMsgs}
import shield.config.{HttpServiceLocation, ServiceLocation, Swagger1ServiceType, Swagger2ServiceType}
import spray.http.Uri
import shield.actors.config.{ChangedContents, ServiceDetails, UpstreamAggregatorMsgs}
import shield.config.{ServiceLocation, Swagger1ServiceType, Swagger2ServiceType}
class S3UpstreamUpdaterSpec extends TestKit(ActorSystem("testSystem"))
with WordSpecLike
with MustMatchers {
"S3UpstreamUpdater" must {
"update parent when parsing expected json" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""[
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5000"},
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5001"},
| {"serviceType": "swagger2", "serviceLocation": "http://localhost:5002"},
| {"serviceType": "swagger2", "serviceLocation": "https://test.org"}
|]
|""".stripMargin)
parent.expectMsg(UpstreamAggregatorMsgs.DiscoveredUpstreams(
Map(
HttpServiceLocation("http://localhost:5000") -> ServiceDetails(Swagger1ServiceType, 1),
HttpServiceLocation("http://localhost:5001") -> ServiceDetails(Swagger1ServiceType, 1),
HttpServiceLocation("http://localhost:5002") -> ServiceDetails(Swagger2ServiceType, 1),
HttpServiceLocation("https://test.org") -> ServiceDetails(Swagger2ServiceType, 1)
)
))
}
"parse weights from json" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""[
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5000", "weight": 0},
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5001", "weight": 1},
| {"serviceType": "swagger2", "serviceLocation": "http://localhost:5002", "weight": 2},
| {"serviceType": "swagger2", "serviceLocation": "https://test.org"}
|]
|""".stripMargin)
parent.expectMsg(UpstreamAggregatorMsgs.DiscoveredUpstreams(
Map(
HttpServiceLocation("http://localhost:5000") -> ServiceDetails(Swagger1ServiceType, 0),
HttpServiceLocation("http://localhost:5001") -> ServiceDetails(Swagger1ServiceType, 1),
HttpServiceLocation("http://localhost:5002") -> ServiceDetails(Swagger2ServiceType, 2),
HttpServiceLocation("https://test.org") -> ServiceDetails(Swagger2ServiceType, 1)
)
))
}
"not update parent when parsing unexpected json" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""{
| "swagger1": {"service": "pyapi", "baseUrl": "http://localhost:5001"},
| "swagger2": [
| {"service": "javaapi", "baseUrl": "http://localhost:5000"},
| {"service": "javaapi", "baseUrl": "http://test.org"}
| ]
|}""".stripMargin)
parent.msgAvailable mustBe false
}
"not update parent when parsing malformed weights" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""[
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5000"},
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5001"},
| {"serviceType": "swagger2", "serviceLocation": "http://localhost:5002", "weight": "0.1"},
| {"serviceType": "swagger2", "serviceLocation": "https://test.org"}
|]
|""".stripMargin)
parent.msgAvailable mustBe false
}
"not update parent when parsing negative weights" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""[
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5000"},
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5001"},
| {"serviceType": "swagger2", "serviceLocation": "http://localhost:5002", "weight": -10},
| {"serviceType": "swagger2", "serviceLocation": "https://test.org"}
|]
|""".stripMargin)
parent.msgAvailable mustBe false
}
"not update parent when there's an unrecognized serviceType" in {
val parent = TestProbe()
val actorRef = TestActorRef(Props(new Actor with JsonUpstreamUpdater), parent.ref, "UnderTestActor")
actorRef ! ChangedContents(
"""[
| {"serviceType": "swagger1", "serviceLocation": "http://localhost:5000"},
| {"serviceType": "swagger2", "serviceLocation": "http://localhost:5002"},
| {"serviceType": "foobar", "serviceLocation": "http://localhost:5002"}
|]
|""".stripMargin)
parent.msgAvailable mustBe false
}
}
}
| RetailMeNot/shield | src/test/scala/shield/actors/config/upstream/S3UpstreamUpdaterSpec.scala | Scala | mit | 5,468 |
package tastytest
object TestMatchTypes {
def test1: MatchTypes.Elem[List[String]] = "hello"
def test2: String = new MatchTypes.Foo[List[String], "hello"].foo("hello")
}
| scala/scala | test/tasty/neg/src-2/TestMatchTypes_fail.scala | Scala | apache-2.0 | 177 |
package com.emotioncity.soriento.testmodels
import javax.persistence.Id
import com.emotioncity.soriento.ODocumentReader
import com.emotioncity.soriento.RichODocumentImpl._
import com.orientechnologies.orient.core.id.ORID
import com.orientechnologies.orient.core.record.impl.ODocument
/**
* Created by stream on 08.09.15.
*/
case class LinkedMessage(text: String, @Id id: Option[ORID] = None)
| dimparf/Soriento | src/test/scala/com/emotioncity/soriento/testmodels/LinkedMessage.scala | Scala | apache-2.0 | 397 |
package lila.socket
package actorApi
import play.api.libs.json.JsObject
import akka.actor.ActorRef
case class Connected[M <: SocketMember](
enumerator: JsEnumerator,
member: M)
case class Sync(uid: String, friends: List[String])
case class Ping(uid: String)
case class PingVersion(uid: String, version: Int)
case object Broom
case class Quit(uid: String)
case class SocketEnter[M <: SocketMember](uid: String, member: M)
case class SocketLeave[M <: SocketMember](uid: String, member: M)
case class Resync(uid: String)
case object GetVersion
case class SendToFlag(flag: String, message: JsObject)
case object PopulationGet
case object PopulationTell
case class NbMembers(nb: Int, pong: JsObject)
case class StartWatching(uid: String, member: SocketMember, gameIds: Set[String])
| Happy0/lila | modules/socket/src/main/actorApi.scala | Scala | mit | 789 |
package utils
import animatedPotato.protocol.protocol._
import play.api.libs.json.Json
case class ID(id: IdType)
object ID {
implicit val IDFormat = Json.format[ID]
}
| fikrimuhal/animated-potato | backend/AnimatedPotatoBackend/app/utils/ID.scala | Scala | gpl-3.0 | 172 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.IOException
import java.net.Socket
import java.util.Collections
import org.apache.kafka.common.protocol.{ApiKeys, Errors, SecurityProtocol}
import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse}
import org.apache.kafka.common.requests.SaslHandshakeRequest
import org.apache.kafka.common.requests.SaslHandshakeResponse
import org.junit.Test
import org.junit.Assert._
import kafka.api.SaslTestHarness
class SaslApiVersionsRequestTest extends BaseRequestTest with SaslTestHarness {
override protected def securityProtocol = SecurityProtocol.SASL_PLAINTEXT
override protected val kafkaClientSaslMechanism = "PLAIN"
override protected val kafkaServerSaslMechanisms = List("PLAIN")
override protected val saslProperties = Some(kafkaSaslProperties(kafkaClientSaslMechanism, Some(kafkaServerSaslMechanisms)))
override protected val zkSaslEnabled = false
override def numBrokers = 1
@Test
def testApiVersionsRequestBeforeSaslHandshakeRequest() {
val plaintextSocket = connect(protocol = securityProtocol)
try {
val apiVersionsResponse = sendApiVersionsRequest(plaintextSocket, new ApiVersionsRequest, 0)
ApiVersionsRequestTest.validateApiVersionsResponse(apiVersionsResponse)
sendSaslHandshakeRequestValidateResponse(plaintextSocket)
} finally {
plaintextSocket.close()
}
}
@Test
def testApiVersionsRequestAfterSaslHandshakeRequest() {
val plaintextSocket = connect(protocol = securityProtocol)
try {
sendSaslHandshakeRequestValidateResponse(plaintextSocket)
try {
sendApiVersionsRequest(plaintextSocket, new ApiVersionsRequest, 0)
fail("Versions Request during Sasl handshake did not fail")
} catch {
case _: IOException => // expected exception
}
} finally {
plaintextSocket.close()
}
}
@Test
def testApiVersionsRequestWithUnsupportedVersion() {
val plaintextSocket = connect(protocol = securityProtocol)
try {
val apiVersionsResponse = sendApiVersionsRequest(plaintextSocket, new ApiVersionsRequest, Short.MaxValue)
assertEquals(Errors.UNSUPPORTED_VERSION.code(), apiVersionsResponse.errorCode)
val apiVersionsResponse2 = sendApiVersionsRequest(plaintextSocket, new ApiVersionsRequest, 0)
ApiVersionsRequestTest.validateApiVersionsResponse(apiVersionsResponse2)
sendSaslHandshakeRequestValidateResponse(plaintextSocket)
} finally {
plaintextSocket.close()
}
}
private def sendApiVersionsRequest(socket: Socket, request: ApiVersionsRequest, version: Short): ApiVersionsResponse = {
val response = send(request, ApiKeys.API_VERSIONS, version, socket)
ApiVersionsResponse.parse(response)
}
private def sendSaslHandshakeRequestValidateResponse(socket: Socket) {
val response = send(new SaslHandshakeRequest("PLAIN"), ApiKeys.SASL_HANDSHAKE, 0.toShort, socket)
val handshakeResponse = SaslHandshakeResponse.parse(response)
assertEquals(Errors.NONE.code, handshakeResponse.errorCode())
assertEquals(Collections.singletonList("PLAIN"), handshakeResponse.enabledMechanisms())
}
}
| eribeiro/kafka | core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala | Scala | apache-2.0 | 3,974 |
import java.util.concurrent.{ThreadFactory, Executors}
import com.typesafe.config.Config
import org.http4s.{Request, Response}
import org.joda.time.{ReadablePeriod, Interval, ReadableInstant}
import techex.web.WebSocket
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
import scalaz.{-\\/, \\/-, Validation}
import scalaz.concurrent.Task
package object techex {
type WebHandler = PartialFunction[Request, Task[Response]]
type WSHandler = PartialFunction[Request,Task[WebSocket]]
type Val[A] = Validation[String, A]
trait UntilBuilder{
def until(to:ReadableInstant):Interval
}
implicit def toDuration(period:ReadablePeriod) = period.toPeriod.toStandardDuration
implicit def toUntilBuilder(inst:ReadableInstant) = new UntilBuilder{
def until(to:ReadableInstant) = new Interval(inst,to)
}
def succ[A](a: A): Task[A] =
Task.now(a)
def fail[A](failMsg: String): Task[A] =
Task.fail(new Exception(failMsg))
def asTask[T](fut: Future[T])(implicit ec: ExecutionContext): Task[T] = {
Task.async {
register =>
fut.onComplete {
case Success(v) => register(\\/-(v))
case Failure(ex) => register(-\\/(ex))
}
}
}
def toJsonQuotes(str: String) =
str.replace("'", "\\"")
def nonEmpty(str: String): Option[String] =
if (str == null || str.length == 0)
None
else
Some(str)
def durationBetween(from:ReadableInstant,to:ReadableInstant) =
if(from.isBefore(to))
new Interval(from,to).toDuration
else
new Interval(to,from).toDuration
def prln(txt:String) = Task{
println(txt)
}
def namedSingleThreadExecutor(name:String) = Executors.newSingleThreadExecutor(new ThreadFactory {
override def newThread(r: Runnable): Thread = {
new Thread(r,name)
}
})
def namedSingleThreadScheduler(name:String) = Executors.newSingleThreadScheduledExecutor(new ThreadFactory {
override def newThread(r: Runnable): Thread = {
new Thread(r,name)
}
})
def getStringOr(cfg:Config,key:String,defValue:String)=
if(cfg.hasPath(key)) cfg.getString(key) else defValue
}
| kantega/tech-ex-2015 | backend/src/main/scala/techex/package.scala | Scala | mit | 2,152 |
package com.theseventhsense.utils.collections
trait Counter {
def inc(): Unit
}
class ObservableIterator[T](iterator: Iterator[T], counter: Counter) extends Iterator[T] {
override def hasNext: Boolean = iterator.hasNext
override def next(): T = {
counter.inc()
iterator.next()
}
}
| 7thsense/utils-collections | core/src/main/scala/com/theseventhsense/utils/collections/ObservableIterator.scala | Scala | mit | 300 |
package autowire
import io.circe.Json
import io.circe.Decoder
import io.circe.Encoder
import io.circe.generic.auto._
import io.circe.parser._
import io.circe.syntax._
trait JsonSerializers extends autowire.Serializers[Json, Decoder, Encoder] {
//TODO error handling improvements (left cases)
override def write[AnyClassToWrite: Encoder](obj: AnyClassToWrite): Json = obj.asJson
override def read[AnyClassToRead](json: Json)(implicit ev: Decoder[AnyClassToRead]): AnyClassToRead = {
val either = ev.decodeJson(json)
if (either.isLeft) throw new Exception(either.left.get)
either.right.get
}
}
object AutoWireServer extends autowire.Server[Json, Decoder, Encoder] with JsonSerializers
| aholland/autowire-circe | server/app/autowire/AutoWireServer.scala | Scala | unlicense | 693 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.hvd
import org.scalatest.{MustMatchers, WordSpec}
import org.scalatestplus.play.guice.GuiceOneAppPerTest
import play.api.mvc.ControllerComponents
class SalesChannelSpec extends WordSpec with MustMatchers with GuiceOneAppPerTest {
import play.api.i18n._
implicit val lang = Lang("en-US")
"getMessage" must {
"return correct text for Retail" in {
val cc = app.injector.instanceOf[ControllerComponents]
implicit val messages: Messages = cc.messagesApi.preferred(Seq(lang))
Retail.getMessage must be("Retail")
}
"return correct text for Wholesale" in {
val cc = app.injector.instanceOf[ControllerComponents]
implicit val messages: Messages = cc.messagesApi.preferred(Seq(lang))
Wholesale.getMessage must be("Wholesale")
}
"return correct text for Auction" in {
val cc = app.injector.instanceOf[ControllerComponents]
implicit val messages: Messages = cc.messagesApi.preferred(Seq(lang))
Auction.getMessage must be("Auction")
}
}
}
| hmrc/amls-frontend | test/models/hvd/SalesChannelSpec.scala | Scala | apache-2.0 | 1,638 |
package amailp.intellij.robot.extensions
import com.intellij.lang.PsiStructureViewFactory
import com.intellij.psi.PsiFile
import com.intellij.ide.structureView._
import com.intellij.openapi.fileEditor.FileEditor
import com.intellij.openapi.project.Project
import amailp.intellij.robot.psi.RobotPsiFile
import amailp.intellij.robot.structureView.RobotTreeBasedStructureViewBuilder
class RobotPsiStructureViewFactory extends PsiStructureViewFactory {
def getStructureViewBuilder(psiFile: PsiFile): StructureViewBuilder = new StructureViewBuilder {
def createStructureView(fileEditor: FileEditor, project: Project): StructureView =
new RobotTreeBasedStructureViewBuilder(psiFile.asInstanceOf[RobotPsiFile]).createStructureView(fileEditor, project)
}
}
| puhnastik/robot-plugin | src/main/scala/amailp/intellij/robot/extensions/RobotPsiStructureViewFactory.scala | Scala | gpl-3.0 | 769 |
package fpinscala.testing
import fpinscala.laziness.{Stream, Cons, Empty}
import fpinscala.state._
import fpinscala.parallelism._
import fpinscala.parallelism.Par.Par
import scala.util.Try
import Gen._
import Prop._
import java.util.concurrent.{Executors,ExecutorService}
/*
The library developed in this chapter goes through several iterations. This file is just the
shell, which you can fill in and modify while working through the chapter.
*/
trait Status
case object Exhausted extends Status
case object Proven extends Status
case object Unfalsified extends Status
case class Prop(run : (MaxSize, TestCases, RNG) => Result) {
def &&(p : Prop) : Prop = Prop{ (m, t, r) =>
if (run(m, t, r).isLeft) run(m, t, r) else p.run(m, t, r)
}
def ||(p : Prop) : Prop = Prop{ (m, t, r) =>
if (run(m, t, r).isRight) run(m, t, r) else p.run(m, t, r)
}
def tag(msg : String) : Prop =
Prop{ (m, t, r) => run(m, t, r) match {
case Left(s) => Left(msg + s)
case x => x
}
}
}
object Prop {
type TestCases = Int
type MaxSize = Int
type Size = Int
type FailedCase = String
type SuccessCount = Int
type Result = Either[FailedCase, (Status, SuccessCount)]
def forAll[A](gen: Gen[A])(f: A => Boolean): Prop = Prop {
(max, n,rng) => {
def go(i: Int, j: Int, s: Stream[Option[A]], onEnd : Int => Result): Result =
if (i == j) Right((Unfalsified, i))
else s match {
case Cons(h,t) => h() match {
case Some(h) =>
try {
if (f(h)) go(i+1,j,t(),onEnd)
else Left(h.toString) }
catch { case e: Exception => Left(buildMsg(h, e)) }
case None => Right((Unfalsified,i))
}
case _ => onEnd(i)
}
val endResult : Int => Result =
if (n < max) i => Right((Proven,i))
else i => Right((Exhausted, i))
go(0, Math.min(n, max)/3, gen.exhaustive, endResult) match {
case Right((Unfalsified,_)) =>
val rands = randomStream(gen)(rng).map(Some(_))
go(n/3, n, rands, i => Right((Unfalsified, i)))
case s => s // If proven or failed, stop immediately
}
}
}
def forAll[A](g: Int => Gen[A])(f: A => Boolean): Prop = Prop {
(max, n, rng) => {
val casesPerSize = n / max + 1
val props : List[Prop] =
Stream.from(0).take(max + 1).map(i => forAll(g(i))(f)).toList
props.map(p => Prop((max, n, rng) => p.run(max, casesPerSize, rng)))
.reduceLeft(_ && _).run(max, n, rng)
}
}
def forAll[A](g : Sized[A])(f : A => Boolean) : Prop =
forAll(g.forSize)(f)
def buildMsg[A](s : A, e : Exception) : String =
"test case " + s + "\\n" +
"generated an exception" + e.getMessage + "\\n" +
"stack trace:\\n" + e.getStackTrace.mkString("\\n")
def buildMsg[A](s : A) : String =
"test case " + s + "\\n" +
"failed to pass the prop"
def run(p : Prop,
maxSize : Int = 100,
testCases : Int = 100,
rng : RNG = RNG.Simple(System.currentTimeMillis)) : Unit = {
p.run(maxSize, testCases, rng) match {
case Left(msg) => println("! test failed: \\n" + msg)
case Right((Unfalsified, i)) => println("+ Property Unfalsified, ran "+ i + " tests" )
case Right((Proven, i)) => println("+ Property Proven, ran "+ i + " tests")
case Right((Exhausted, i)) => println("+ Property Unfalsified up to max size, ran "+ i + " tests")
}
}
}
case class Gen[+A](sample: State[RNG, A], exhaustive : Stream[Option[A]]) {
def map[B](f: A => B): Gen[B] = Gen(sample.map(f), exhaustive.map(o => o.map(f)))
def map2[B, C](g : Gen[B])(f : (A, B) => C) : Gen[C] = Gen(
this.sample.map2(g.sample)(f),
this.exhaustive.zipWith(g.exhaustive)((oa, ob) =>
for {
a <- oa
b <- ob
} yield f(a,b)
)
)
def flatMap[B](f: A => Gen[B]): Gen[B] = Gen(
sample.flatMap(a => f(a).sample),
Stream.empty
/*exhaustive.flatMap {
case None => Stream(None)
case Some(a) => f(a).exhaustive
}*/
)
def unsized : SGen[A] = Unsized(this)
/* A method alias for the function we wrote earlier. */
def listOfN(size: Int): Gen[List[A]] =
Gen.listOfN(size, this)
def **[B](g: Gen[B]): Gen[(A,B)] =
(this map2 g)((_,_))
}
object Gen {
def choose(start: Int, stopExclusive : Int) : Gen[Int] =
Gen(State(RNG.nonNegativeInt).map(x => start + x % (stopExclusive - start)),
Stream.unfold(start)(x => if (x < stopExclusive) Some((Some(x), x+1)) else None)
)
def choose(i : Double, j : Double) : Gen[Double] =
Gen(State(RNG.double).map(d => i + d % (j-i)),
Stream.empty
)
def chooseViaMap(i : Double, j : Double) : Gen[Double] =
uniform.map(d => i + d % (j-i))
def unit[A](a: => A): Gen[A] = Gen(State.unit(a), Stream(Some(a)))
def boolean : Gen[Boolean] = Gen(State(RNG.map(RNG.int)(x => x % 2 == 0)), Stream(Some(true), Some(false)))
def uniform : Gen[Double] = Gen(State(RNG.double), Stream.empty)
def listOfN[A](n : Int, g : Gen[A]) : Gen[List[A]] =
g.flatMap((a : A) => if (n <= 0) Gen.unit(List()) else listOfN(n-1, g).map((l : List[A]) => a :: l))
def sameParity(from : Int, to : Int) : Gen[(Int, Int)] =
choose(from, to).map2(choose(from, to))((_, _)).map(t =>
t match {
case (d, e) if (d % 2 == 0 && e % 2 == 1) => (d, e-1)
case (d, e) if (d % 2 == 1 && e % 2 == 0) => (d, e-1)
case _ => t
})
def union[A](g1 : Gen[A], g2 : Gen[A]) : Gen[A] =
boolean.flatMap(b => if (b) g1 else g2)
def weighted[A](g1 : (Gen[A], Double), g2 : (Gen[A], Double)) : Gen[A] = {
val g1Threshold = g1._2.abs / (g1._2.abs + g2._2.abs)
uniform.flatMap(d => if (d < g1Threshold ) g1._1 else g2._1)
}
def randomStream[A](gen : Gen[A])(rng : RNG) : Stream[A] =
Stream.unfold(rng)(rng => Some(gen.sample.run(rng)))
def listOf[A](g: Gen[A]): Sized[List[A]] =
Sized(n => g.listOfN(n))
def listOf1[A](g : Gen[A]) : Sized[List[A]] =
Sized(n => if (n == 0) g.listOfN(n+1) else g.listOfN(n.abs))
}
trait SGen[+A]
case class Sized[+A](forSize : Size => Gen[A]) extends SGen[A]
case class Unsized[+A](get : Gen[A]) extends SGen[A]
object SGen {
def listOfN[A](g : Gen[A]) : SGen[List[A]] =
Sized(size => Gen.listOfN(size, g))
}
| benkio/fpinscala | exercises/src/main/scala/fpinscala/testing/Gen.scala | Scala | mit | 6,406 |
System.out.println("here I am!")
| raymondpoling/bandit-script | resources/scala/script1.scala | Scala | epl-1.0 | 33 |
package controllers
import javax.inject.{Inject, Named}
import akka.actor.ActorRef
import models.actors.analyses.AnalysesActor
import models.db._
import models.form.AnalysesForm
import play.api.data.Form
import play.api.data.Forms._
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.mvc.Controller
import scala.concurrent.{ExecutionContext, Future}
class AnalysesController @Inject()(@Named("analyses-actor") val analysesActor: ActorRef,
val dataImportRepository: DataImportRepository,
val messagesApi: MessagesApi)(implicit ec: ExecutionContext)
extends Controller with I18nSupport with UserInfo {
val analysesForm: Form[AnalysesForm] = Form {
mapping(
"yearMonth" -> nonEmptyText
)(AnalysesForm.apply)(AnalysesForm.unapply)
}
def analysesPage = SecureRequest.async { implicit request =>
dataImportRepository.getAll map { imports =>
val years = importsToYearsForView(imports)
Ok(views.html.analyses(analysesForm, years))
}
}
def doAnalyses = SecureRequest.async { implicit request =>
analysesForm.bindFromRequest.fold(
error => Future {
Redirect(routes.AnalysesController.analysesPage()).flashing(("error") ->
s"Falha ao realizar análise.")
},
form => {
analysesActor ! AnalysesActor.StartAnalyses(request.userEmail, form.yearMonth)
Future {
Redirect(routes.AnalysesController.analysesPage()).flashing(("success") ->
s"Análises para ${form.yearMonth} sendo realizadas. O processo será concluído em breve.")
}
}
)
}
}
| LeonardoZ/SAEB | app/controllers/AnalysesController.scala | Scala | mit | 1,722 |
package views.vrm_assign
object Timeout {
final val ExitId = "exit"
}
| dvla/vrm-assign-online | app/views/vrm_assign/Timeout.scala | Scala | mit | 74 |
import leon.lang._
object DaysToYears {
val base : Int = 1980
def isLeapYear(y : Int): Boolean = y % 4 == 0
def daysToYears(days : Int): Int = {
require(days > 0)
daysToYears1(base, days)._1
}
def daysToYears1(year : Int, days : Int): (Int, Int) = {
require(year >= base && days > 0)
if (days > 366 && isLeapYear(year))
daysToYears1(year + 1, days - 366) // TODO this branch cannot be solved although it is correct because it depends on the erroneous branch
else if (days > 365 && !isLeapYear(year))
daysToYears1(year + 1, days - 365)
else (year + 1, days) // FIXME +1
} ensuring { res =>
res._2 <= 366 &&
res._2 > 0 &&
res._1 >= base &&
(((year,days), res) passes {
case (1980, 366 ) => (1980, 366)
case (1980, 1000) => (1982, 269)
})
}
def main(args : Array[String]) = {
println(daysToYears1(base, 10593 ))
println(daysToYears1(base, 366 ))
println(daysToYears1(base, 1000 ))
}
}
| ericpony/scala-examples | testcases/repair/DaysToYears/DaysToYears2.scala | Scala | mit | 998 |
package proofpeer.proofscript.frontend
import proofpeer.general.StringUtils
import proofpeer.indent.{ParseTree => IndentParseTree, _}
import proofpeer.indent.regex._
import proofpeer.indent.{Constraint => CS}
import ParseTree._
import proofpeer.proofscript.logic.{Preterm, Syntax, Namespace}
class ProofScriptGrammar(annotate : (Any, Option[Span]) => Any) {
def lex(terminal : String, expr : RegularExpr, prio : Option[Int] = None) : Grammar =
proofpeer.indent.rule(terminal, expr, prio, "\\\\root")
var keywords : Set[String] = Set()
def keyword(terminal : String, kw : String) : Grammar = {
keywords += kw
lex(terminal, string(kw), Some(2))
}
def freshkeyword(terminal : String, kw : String) : Grammar = {
keywords += kw
lex(terminal, string(kw), Some(3))
}
/*
≤
LESS-THAN OR EQUAL TO
Unicode: U+2264, UTF-8: E2 89 A4
≥
GREATER-THAN OR EQUAL TO
Unicode: U+2265, UTF-8: E2 89 A5
⇒
RIGHTWARDS DOUBLE ARROW
Unicode: U+21D2, UTF-8: E2 87 92
≔
COLON EQUALS
Unicode: U+2254, UTF-8: E2 89 94
'
APOSTROPHE
Unicode: U+0027, UTF-8: 27
*/
val hexdigit = alt(chars('a', 'f'), chars('A', 'F'), chars('0', '9'))
def g_literals =
lex("HexDigit", hexdigit) ++
lex("QuotationMark", char('"')) ++
lex("StringLiteralToken", REPEAT1(alt(
string("\\\\n"),
string("\\\\\\\\"),
string("\\\\\\""),
seq(char('\\\\'), char('u'), hexdigit, hexdigit, hexdigit, hexdigit),
seq(char('\\\\'), char('U'), hexdigit, hexdigit, hexdigit, hexdigit, hexdigit, hexdigit),
char(0x21),
chars(0x23, 0x5B),
chars(0x5D, 0x7E),
chars(0xA0, Int.MaxValue)))) ++
lex("Hash", char('#')) ++
lex("AnyToken", REPEAT1(CHAR(Range.universal))) ++
lex("Plus", char('+')) ++
lex("Minus", char('-')) ++
lex("Times", char('*')) ++
lex("Slash", char('/')) ++
lex("Le", char('<')) ++
lex("Gr", char('>')) ++
lex("Leq", ALT(char(0x2264), string("<="))) ++
lex("Geq", ALT(char(0x2265), string(">="))) ++
lex("QuestionMark", char('?')) ++
lex("ExclamationMark", char('!')) ++
lex("SquareBracketOpen", char('[')) ++
lex("SquareBracketClose", char(']')) ++
lex("ScriptMapsTo", ALT(char(0x21A6), string("=>"))) ++
lex("DoubleArrow", ALT(char(0x21D2), string("=>"))) ++
lex("SingleArrow", ALT(char(0x2192), string("->"))) ++
lex("ScriptEq", string("==")) ++
lex("ScriptNotEq", ALT(char(0x2260), string("<>"))) ++
lex("Apostrophe", char(0x27)) ++
lex("Prepend", string("<+")) ++
lex("Append", string("+>")) ++
lex("Concat", string("++")) ++
lex("MinusMinus", string("--")) ++
keyword("Val", "val") ++
freshkeyword("FreshVal", "val") ++
freshkeyword("FreshAssign", "=") ++
keyword("Def", "def") ++
keyword("Table", "table") ++
keyword("Datatype", "datatype") ++
keyword("Mod", "mod") ++
keyword("ScriptOr", "or") ++
keyword("ScriptAnd", "and") ++
keyword("ScriptNot", "not") ++
keyword("ScriptTrue", "true") ++
keyword("ScriptFalse", "false") ++
keyword("Lazy", "lazy") ++
keyword("If", "if") ++
keyword("Then", "then") ++
keyword("Else", "else") ++
keyword("While", "while") ++
keyword("Do", "do") ++
keyword("For", "for") ++
keyword("In", "in") ++
keyword("Match", "match") ++
keyword("Case", "case") ++
keyword("Timeit", "timeit") ++
keyword("Return", "return") ++
keyword("Assume", "assume") ++
keyword("Let", "let") ++
keyword("Choose", "choose") ++
keyword("Theory", "theory") ++
keyword("Extends", "extends") ++
keyword("Context", "context") ++
keyword("Literalcontext", "literalcontext") ++
keyword("InContext", "incontext") ++
keyword("InLiteralcontext", "inliteralcontext") ++
keyword("Show", "show") ++
keyword("Fail", "fail") ++
keyword("Nil", "nil") ++
keyword("To", "to") ++
keyword("Downto", "downto") ++
keyword("Theorem", "theorem") ++
keyword("Assert", "assert") ++
keyword("Failure", "failure") ++
keyword("As", "as") ++
keyword("By", "by") ++
keyword("TyNil", "Nil") ++
keyword("TyTheorem", "Theorem") ++
keyword("TyTerm", "Term") ++
keyword("TyString", "String") ++
keyword("TyType", "Type") ++
keyword("TyContext", "Context") ++
keyword("TyBoolean", "Boolean") ++
keyword("TyInteger", "Integer") ++
keyword("TyFunction", "Function") ++
keyword("TyTuple", "Tuple") ++
keyword("TyMap", "Map") ++
keyword("TySet", "Set")
def optspan(span : Span) : Option[Span] = {
if (span == null) None else Some(span)
}
def arule(n : String, rhs : String, constraint : Constraint,
action : ParseContext => Any) : Grammar =
{
def annotatedAction(c : ParseContext) : Any = {
annotate(action(c), optspan(c.span))
}
val (r, params, result) = string2rhs(rhs)
Grammar(ParseRule(n, r, params, constraint, result, annotatedAction))
}
def annotateUnop(b : UnaryOperator, span : Span) : UnaryOperator =
annotate(b, optspan(span)).asInstanceOf[UnaryOperator]
def annotateBinop(b : BinaryOperator, span : Span) : BinaryOperator =
annotate(b, optspan(span)).asInstanceOf[BinaryOperator]
def arule(n : String, rhs : String, action : ParseContext => Any) : Grammar =
arule(n, rhs, Constraint.unconstrained, action)
def mkTuple(elements : Vector[Expr], collapse : Boolean) : Expr = {
if (collapse && elements.size == 1)
elements.head
else
Tuple(elements)
}
def mkTuplePattern(elements : Vector[Pattern], collapse : Boolean) : Pattern = {
if (collapse && elements.size == 1)
elements.head
else
PTuple(elements)
}
def mkNamePattern(nametext : String, arg : Option[Pattern]) : Pattern = {
val name = Syntax.parseName(nametext)
if (StringUtils.isASCIIUpperLetter(name.name.name(0)))
PConstr(name, arg)
else {
if (name.namespace.isDefined)
PError("unqualified identifier expected")
else {
arg match {
case None => PId(name.toString)
case Some(arg) => PDestruct(name.toString, arg)
}
}
}
}
def mkTyCustom(text : String) : TyCustom = {
val name = Syntax.parseName(text)
TyCustom(name.namespace, name.name.toString)
}
def mkStringLiteral(c : ParseContext, quot1 : Span, quot2 : Span) : StringLiteral =
{
import proofpeer.general.StringUtils
val len = quot2.lastIndexExcl - quot1.firstIndexIncl
val s = c.document.getText(quot1.firstIndexIncl, len)
mkStringLiteralFromCodes(StringUtils.codePoints(s.substring(1, s.length - 1)))
}
def mkStringLiteralFromCodes(escapedCodes : Vector[Int]) : StringLiteral =
{
def readInt(i : Int, j : Int) : Int = {
var v = 0
for (k <- i until j) {
v = v * 16
val c = escapedCodes(k)
if (c >= '0' && c <= '9') v = v + (c - '0')
else if (c >= 'a' && c <= 'z') v = v + (c - 'a' + 10)
else if (c >= 'A' && c <= 'Z') v = v + (c - 'A' + 10)
else throw new RuntimeException("hex digit expected, but found "+c)
}
v
}
var codes : Vector[Int] = Vector()
var i = 0
val len = escapedCodes.size
while (i < len) {
val c = escapedCodes(i)
if (c == 0x5C) {
val d = escapedCodes(i + 1)
i = i + 2
d match {
case 'n' => codes = codes :+ 0x0A
case '"' => codes = codes :+ 0x22
case '\\\\' => codes = codes :+ 0x5C
case 'u' =>
codes = codes :+ readInt(i, i + 4)
i = i + 4
case 'U' =>
codes = codes :+ readInt(i, i + 8)
i = i + 8
case _ => throw new RuntimeException ("internal error: unexpected escape character code "+d)
}
} else {
codes = codes :+ c
i = i + 1
}
}
StringLiteral(codes)
}
def Subalign(a : String, b : String) = CS.or(CS.Indent(a, b), CS.Align(a, b))
val g_expr =
arule("StringLiteral", "", c => null) ++
arule("StringLiteral", "StringLiteral StringLiteralToken", c => null) ++
arule("PrimitiveExpr", "Name", c => mkId(Syntax.parseName(c.text))) ++
arule("Int", "Digits", c => Integer(BigInt(c.text, 10))) ++
arule("Int", "Minus Digits", c => Integer(-BigInt(c.text("Digits"), 10))) ++
arule("PrimitiveExpr", "Digits", c => Integer(BigInt(c.text("Digits"), 10))) ++
arule("PrimitiveExpr", "RoundBracketOpen ExprList RoundBracketClose", c => mkTuple(c.ExprList, true)) ++
arule("PrimitiveExpr", "SquareBracketOpen ExprList SquareBracketClose", c => mkTuple(c.ExprList, false)) ++
arule("PrimitiveExpr", "CurlyBracketOpen ExprSetList1 CurlyBracketClose", c => SetLiteral(c.ExprSetList1)) ++
arule("PrimitiveExpr", "CurlyBracketOpen ExprMapList1 CurlyBracketClose", c => MapLiteral(c.ExprMapList1)) ++
arule("PrimitiveExpr", "CurlyBracketOpen CurlyBracketClose", c => SetLiteral(Vector())) ++
arule("PrimitiveExpr", "CurlyBracketOpen SingleArrow CurlyBracketClose", c => MapLiteral(Vector())) ++
arule("PrimitiveExpr", "ScriptTrue", c => Bool(true)) ++
arule("PrimitiveExpr", "ScriptFalse", c => Bool(false)) ++
arule("PrimitiveExpr", "Nil", c => NilExpr) ++
arule("PrimitiveExpr", "Literalcontext", c => LiteralcontextExpr) ++
arule("PrimitiveExpr", "Apostrophe ValueTerm Apostrophe", c => LogicTerm(c.ValueTerm)) ++
arule("PrimitiveExpr", "Apostrophe Colon ValueType Apostrophe", c => LogicType(c.ValueType)) ++
arule("PrimitiveExpr", "QuotationMark_1 StringLiteral QuotationMark_2", c => mkStringLiteral(c, c.span("QuotationMark_1"), c.span("QuotationMark_2"))) ++
arule("OrExpr", "OrExpr ScriptOr AndExpr",
c => BinaryOperation(annotateBinop(Or, c.span("ScriptOr")), c.OrExpr, c.AndExpr)) ++
arule("OrExpr", "AndExpr", _.AndExpr[Any]) ++
arule("AndExpr", "AndExpr ScriptAnd NotExpr",
c => BinaryOperation(annotateBinop(And, c.span("ScriptAnd")), c.AndExpr, c.NotExpr)) ++
arule("AndExpr", "NotExpr", _.NotExpr[Any]) ++
arule("NotExpr", "ScriptNot NotExpr",
c => UnaryOperation(annotateUnop(Not, c.span("ScriptNot")), c.NotExpr)) ++
arule("NotExpr", "CmpExpr", _.CmpExpr[Any]) ++
arule("CmpExpr", "CmpExpr CmpOp GeneralArithExpr", { c =>
val operator : CmpOperator = c.CmpOp
val operand : Expr = c.GeneralArithExpr
val cmpExpr : Expr = c.CmpExpr
cmpExpr match {
case op : CmpOperation =>
CmpOperation(op.operators :+ operator, op.operands :+ operand)
case e =>
CmpOperation(Vector(operator), Vector(e, operand))
}
}) ++
arule("CmpExpr", "GeneralArithExpr", _.GeneralArithExpr[Any]) ++
arule("CmpOp", "Le", c => Le) ++
arule("CmpOp", "Gr", c => Gr) ++
arule("CmpOp", "Leq", c => Leq) ++
arule("CmpOp", "Geq", c => Geq) ++
arule("CmpOp", "ScriptEq", c => Eq) ++
arule("CmpOp", "ScriptNotEq", c => NEq) ++
arule("GeneralArithExpr", "ConcatExpr", _.ConcatExpr[Any]) ++
arule("ConcatExpr", "ConcatExpr MinusMinus ArithExpr", c => BinaryOperation(annotateBinop(Minus, c.span("MinusMinus")), c.ConcatExpr, c.ArithExpr)) ++
arule("ConcatExpr", "PrependConcatExpr", _.PrependConcatExpr[Any]) ++
arule("ConcatExpr", "ConcatExpr Append ArithExpr", c => BinaryOperation(annotateBinop(Append, c.span("Append")), c.ConcatExpr, c.ArithExpr)) ++
arule("PrependConcatExpr", "PrependExpr", _.PrependExpr[Any]) ++
arule("PrependConcatExpr", "PrependConcatExpr Concat ArithExpr", c => BinaryOperation(annotateBinop(Concat, c.span("Concat")), c.PrependConcatExpr, c.ArithExpr)) ++
arule("PrependExpr", "ArithExpr Prepend PrependExpr", c => BinaryOperation(annotateBinop(Prepend, c.span("Prepend")), c.ArithExpr, c.PrependExpr)) ++
arule("PrependExpr", "ArithExpr", _.ArithExpr[Any]) ++
arule("ArithExpr", "RangeExpr", _.RangeExpr[Any]) ++
arule("RangeExpr", "AddExpr", _.AddExpr[Any]) ++
arule("RangeExpr", "AddExpr_1 To AddExpr_2",
c => BinaryOperation(annotateBinop(RangeTo, c.span("To")), c.AddExpr_1, c.AddExpr_2)) ++
arule("RangeExpr", "AddExpr_1 Downto AddExpr_2",
c => BinaryOperation(annotateBinop(RangeDownto, c.span("Downto")), c.AddExpr_1, c.AddExpr_2)) ++
arule("AddExpr", "AddExpr Plus NegExpr",
c => BinaryOperation(annotateBinop(Add, c.span("Plus")), c.AddExpr, c.NegExpr)) ++
arule("AddExpr", "AddExpr Minus NegExpr",
c => BinaryOperation(annotateBinop(Sub, c.span("Minus")), c.AddExpr, c.NegExpr)) ++
arule("AddExpr", "NegExpr", _.NegExpr[Any]) ++
arule("NegExpr", "Minus NegExpr",
c => UnaryOperation(annotateUnop(Neg, c.span("Minus")), c.NegExpr)) ++
arule("NegExpr", "MultExpr", _.MultExpr[Any]) ++
arule("MultExpr", "MultExpr Times BasicExpr",
c => BinaryOperation(annotateBinop(Mul, c.span("Times")), c.MultExpr, c.BasicExpr)) ++
arule("MultExpr", "MultExpr Slash BasicExpr",
c => BinaryOperation(annotateBinop(Div, c.span("Slash")), c.MultExpr, c.BasicExpr)) ++
arule("MultExpr", "MultExpr Mod BasicExpr",
c => BinaryOperation(annotateBinop(Mod, c.span("Mod")), c.MultExpr, c.BasicExpr)) ++
arule("MultExpr", "BasicExpr", _.BasicExpr[Any]) ++
arule("BasicExpr", "AppExpr", _.AppExpr[Any]) ++
arule("AppExpr", "BangExpr", _.BangExpr[Any]) ++
arule("AppExpr", "AppExpr BangExpr", c => App(c.AppExpr, c.BangExpr)) ++
arule("BangExpr", "PrimitiveExpr", _.PrimitiveExpr[Any]) ++
arule("BangExpr", "BangExpr ExclamationMark",
c => UnaryOperation(annotateUnop(Bang, c.span("ExclamationMark")), c.BangExpr)) ++
arule("LazyExpr", "OrExpr", _.OrExpr[Any]) ++
arule("LazyExpr", "Lazy LazyExpr", c => Lazy(c.LazyExpr)) ++
arule("FunExpr", "Pattern ScriptMapsTo Block", c => Fun(c.Pattern, c.Block)) ++
arule("TypedExpr", "LazyExpr Colon ScriptValueType", c => TypeCast(c.LazyExpr, c.ScriptValueType)) ++
arule("TypedExpr", "LazyExpr", _.LazyExpr[Any]) ++
arule("FunExpr", "TypedExpr", _.TypedExpr[Any]) ++
arule("Expr", "FunExpr", _.FunExpr[Any]) ++
arule("ExprList", "", c => Vector[Expr]()) ++
arule("ExprList", "ExprList1", _.ExprList1[Any]) ++
arule("ExprList1", "PExpr", c => Vector[Expr](c.PExpr)) ++
arule("ExprList1", "Comma PExpr", c => Vector[Expr](NilExpr, c.PExpr)) ++
arule("ExprList1", "Comma", c => Vector[Expr](NilExpr, NilExpr)) ++
arule("ExprList1", "ExprList1 Comma PExpr", c => c.ExprList1[Vector[Expr]] :+ c.PExpr) ++
arule("ExprList1", "ExprList1 Comma", c => c.ExprList1[Vector[Expr]] :+ NilExpr) ++
arule("ExprSetList1", "PExpr", c => Vector[Expr](c.PExpr)) ++
arule("ExprSetList1", "ExprSetList1 Comma PExpr", c => c.ExprSetList1[Vector[Expr]] :+ c.PExpr) ++
arule("ExprMapList1", "PExpr_1 SingleArrow PExpr_2", c => Vector[(Expr, Expr)]((c.PExpr_1, c.PExpr_2))) ++
arule("ExprMapList1", "ExprMapList1 Comma PExpr_1 SingleArrow PExpr_2", c => c.ExprMapList1[Vector[(Expr, Expr)]] :+ (c.PExpr_1, c.PExpr_2)) ++
arule("PExpr", "Expr", _.Expr[Expr]) ++
arule("PExpr", "ControlFlowExpr", c => ControlFlowExpr(c.ControlFlowExpr)) ++
arule("PExprOrFreshVar", "PExpr", c => c.PExpr[Any]) ++
arule("PExprOrFreshVar", "FreshVal FreshId", c => FreshQuote(false, c.FreshId)) ++
arule("PExprOrFreshVar", "FreshAssign FreshId", c => FreshQuote(true, c.FreshId)) ++
arule("FreshId", "IndexedName", c => mkId(Syntax.parseName(c.text))
)
val g_do =
arule("STDo", "Do Block",
CS.Indent("Do", "Block"),
c => Do(c.Block, false)) ++
arule("DoExpr", "Do Block",
c => Do(c.Block, false)) ++
arule("STDo", "Do Times Block",
CS.and(CS.Indent("Do", "Times"), CS.Indent("Do", "Block")),
c => Do(c.Block, true)) ++
arule("DoExpr", "Do Times Block",
c => Do(c.Block, true))
val g_if =
arule("STIf", "If PExpr Then Block_1 Else Block_2",
CS.and(
CS.Indent("If", "PExpr"),
CS.ifThenElse(CS.Line("If", "Then"),
CS.and(
CS.Indent("If", "Block_1"),
Subalign("If", "Else")),
CS.and(
Subalign("If", "Then"),
CS.Indent("Then", "Block_1"),
CS.or(CS.Line("Then", "Else"), CS.Align("Then", "Else")))),
CS.ifThenElse(CS.Line("If", "Else"),
CS.Indent("If", "Block_2"),
CS.ifThenElse(CS.Line("Then", "Else"),
CS.Indent("Then", "Block_2"),
CS.Indent("Else", "Block_2")))),
c => If(c.PExpr, c.Block_1, c.Block_2)) ++
arule("STIf", "If PExpr Then Block",
CS.and(
CS.Indent("If", "PExpr"),
CS.ifThenElse(CS.Line("If", "Then"),
CS.Indent("If", "Block"),
CS.and(
Subalign("If", "Then"),
CS.Indent("Then", "Block")))),
c => If(c.PExpr, c.Block, Block(Vector()))) ++
arule("IfExpr", "If PExpr Then Block_1 Else Block_2",
c => If(c.PExpr, c.Block_1, c.Block_2)) ++
arule("IfExpr", "If PExpr Then Block",
c => If(c.PExpr, c.Block, Block(Vector())))
val g_while =
arule("STWhile", "While PExpr Do Block",
CS.and(
CS.Indent("While", "PExpr"),
CS.ifThenElse(CS.Line("While", "Do"),
CS.Indent("While", "Block"),
CS.and(Subalign("While", "Do"), CS.Indent("Do", "Block")))),
c => While(c.PExpr, c.Block)) ++
arule("WhileExpr", "While PExpr Do Block",
c => While(c.PExpr, c.Block))
val g_for =
arule("STFor", "For Pattern In PExpr Do Block",
CS.and(
CS.Indent("For", "Pattern"),
CS.Indent("For", "In"),
CS.Indent("For", "PExpr"),
CS.ifThenElse(CS.Line("For", "Do"),
CS.Indent("For", "Block"),
CS.and(Subalign("For", "Do"), CS.Indent("Do", "Block")))),
c => For(c.Pattern, c.PExpr, c.Block)) ++
arule("ForExpr", "For Pattern In PExpr Do Block",
c => For(c.Pattern, c.PExpr, c.Block))
val g_timeit =
arule("STTimeit", "Timeit Block",
CS.Indent("Timeit", "Block"),
c => Timeit(c.Block)) ++
arule("TimeitExpr", "Timeit Block",
c => Timeit(c.Block))
val g_match =
arule("STMatch", "Match PExpr STMatchCases",
CS.and(
CS.Indent("Match", "PExpr"),
Subalign("Match", "STMatchCases")),
c => Match(c.PExpr, c.STMatchCases)) ++
arule("STMatchCases", "STMatchCases_0 STMatchCase",
CS.or(CS.Align("STMatchCases_0", "STMatchCase"), CS.Line("STMatchCases_0", "STMatchCase")),
c => c.STMatchCases_0[Vector[MatchCase]] :+ c.STMatchCase) ++
arule("STMatchCases", "", c => Vector[MatchCase]()) ++
arule("STMatchCase", "Case Pattern DoubleArrow Block",
CS.and(
CS.Indent("Case", "Pattern"),
CS.SameLine("Pattern", "DoubleArrow"),
CS.Indent("Case", "Block")),
c => MatchCase(c.Pattern, c.Block)) ++
arule("MatchExpr", "Match PExpr MatchCases",
c => Match(c.PExpr, c.MatchCases)) ++
arule("MatchCases", "MatchCases MatchCase",
c => c.MatchCases[Vector[MatchCase]] :+ c.MatchCase) ++
arule("MatchCases", "", c => Vector[MatchCase]()) ++
arule("MatchCase", "Case Pattern DoubleArrow Block",
c => MatchCase(c.Pattern, c.Block))
def contextrules(contextKeyword : String, f : (Option[Expr], Block) => ControlFlow) : Grammar =
arule("STContext", contextKeyword + " OptContextParam Block",
CS.and(
CS.Indent(contextKeyword, "OptContextParam"),
CS.Indent(contextKeyword, "Block")),
c => f(c.OptContextParam, c.Block)) ++
arule("ContextExpr", contextKeyword + " OptContextParam Block",
c => f(c.OptContextParam, c.Block))
val g_context =
contextrules("Context", ContextControl.apply _) ++
contextrules("InContext", InContextControl.apply _) ++
contextrules("InLiteralcontext", InLiteralcontextControl.apply _) ++
arule("OptContextParam", "", c => None) ++
arule("OptContextParam", "Le PExpr Gr", c => Some(c.PExpr[Any]))
val g_controlflow =
g_do ++ g_if ++ g_while ++ g_for ++ g_match ++ g_context ++ g_timeit ++
arule("STControlFlow", "STDo", _.STDo[Any]) ++
arule("STControlFlow", "STIf", _.STIf[Any]) ++
arule("STControlFlow", "STWhile", _.STWhile[Any]) ++
arule("STControlFlow", "STFor", _.STFor[Any]) ++
arule("STControlFlow", "STMatch", _.STMatch[Any]) ++
arule("STControlFlow", "STContext", _.STContext[Any]) ++
arule("STControlFlow", "STTimeit", _.STTimeit[Any]) ++
arule("ControlFlowExpr", "DoExpr", _.DoExpr[Any]) ++
arule("ControlFlowExpr", "IfExpr", _.IfExpr[Any]) ++
arule("ControlFlowExpr", "WhileExpr", _.WhileExpr[Any]) ++
arule("ControlFlowExpr", "ForExpr", _.ForExpr[Any]) ++
arule("ControlFlowExpr", "MatchExpr", _.MatchExpr[Any]) ++
arule("ControlFlowExpr", "ContextExpr", _.ContextExpr[Any]) ++
arule("ControlFlowExpr", "TimeitExpr", _.TimeitExpr[Any])
val g_pattern =
arule("AtomicPattern", "Underscore", c => PAny) ++
arule("AtomicPattern", "Nil", c => PNil) ++
arule("AtomicPattern", "Nil ExclamationMark", c => PNilBang) ++
arule("AtomicPattern", "Name", c => mkNamePattern(c.text("Name"), None)) ++
arule("AtomicPattern", "Int", c => PInt(c.Int[Integer].value)) ++
arule("AtomicPattern", "QuotationMark_1 StringLiteral QuotationMark_2",
c => PString(mkStringLiteral(c, c.span("QuotationMark_1"), c.span("QuotationMark_2")).value)) ++
arule("AtomicPattern", "ScriptTrue", c => PBool(true)) ++
arule("AtomicPattern", "ScriptFalse", c => PBool(false)) ++
arule("AtomicPattern", "Apostrophe PatternTerm Apostrophe", c => PLogicTerm(c.PatternTerm)) ++
arule("AtomicPattern", "Apostrophe Colon PatternType Apostrophe", c => PLogicType(c.PatternType)) ++
arule("AtomicPattern", "RoundBracketOpen PatternList RoundBracketClose", c => mkTuplePattern(c.PatternList, true)) ++
arule("AtomicPattern", "SquareBracketOpen PatternList SquareBracketClose", c => mkTuplePattern(c.PatternList, false)) ++
arule("ConstrPattern", "AtomicPattern", _.AtomicPattern[Any]) ++
arule("ConstrPattern", "Name AtomicPattern", c => mkNamePattern(c.text("Name"), Some(c.AtomicPattern))) ++
arule("PrependPattern", "ConstrPattern Prepend PrependPattern", c => PPrepend(c.ConstrPattern, c.PrependPattern)) ++
arule("PrependPattern", "AppendPattern", _.AppendPattern[Any]) ++
arule("AppendPattern", "AppendPattern Append ConstrPattern", c => PAppend(c.AppendPattern, c.ConstrPattern)) ++
arule("AppendPattern", "ConstrPattern", _.ConstrPattern[Any]) ++
arule("ScriptValuePrimitiveType", "Underscore", c => TyAny) ++
arule("ScriptValuePrimitiveType", "TyNil", c => TyNil) ++
arule("ScriptValuePrimitiveType", "TyContext", c => TyContext) ++
arule("ScriptValuePrimitiveType", "TyTheorem", c => TyTheorem) ++
arule("ScriptValuePrimitiveType", "TyTerm", c => TyTerm) ++
arule("ScriptValuePrimitiveType", "TyType", c => TyType) ++
arule("ScriptValuePrimitiveType", "TyBoolean", c => TyBoolean) ++
arule("ScriptValuePrimitiveType", "TyInteger", c => TyInteger) ++
arule("ScriptValuePrimitiveType", "TyString", c => TyString) ++
arule("ScriptValuePrimitiveType", "TyTuple", c => TyTuple) ++
arule("ScriptValuePrimitiveType", "TyMap", c => TyMap) ++
arule("ScriptValuePrimitiveType", "TySet", c => TySet) ++
arule("ScriptValuePrimitiveType", "TyFunction", c => TyFunction) ++
arule("ScriptValuePrimitiveType", "Name", c => mkTyCustom(c.text("Name"))) ++
arule("ScriptValueType", "ScriptValuePrimitiveType", c => c.ScriptValuePrimitiveType[Any]) ++
arule("ScriptValueType", "ScriptValueType Bar ScriptValuePrimitiveType", c => TyUnion(c.ScriptValueType, c.ScriptValuePrimitiveType)) ++
arule("ScriptValueType", "ScriptValuePrimitiveType QuestionMark", c => TyOption(c.ScriptValuePrimitiveType)) ++
arule("AsPattern", "PrependPattern", _.PrependPattern[Any]) ++
arule("AsPattern", "Pattern As IndexedName", c => PAs(c.Pattern, c.text("IndexedName"))) ++
arule("IfPattern", "AsPattern", _.AsPattern[Any]) ++
arule("IfPattern", "Pattern If Expr", c => PIf(c.Pattern, c.Expr)) ++
arule("ArgumentPattern", "IfPattern", _.IfPattern[Any]) ++
arule("TypePattern", "ArgumentPattern", _.ArgumentPattern[Any]) ++
arule("TypePattern", "TypePattern Colon ScriptValueType", c => PType(c.TypePattern, c.ScriptValueType)) ++
arule("Pattern", "TypePattern", _.TypePattern[Any]) ++
arule("OptPattern", "", c => None) ++
arule("OptPattern", "Pattern", c => Some(c.Pattern[Any])) ++
arule("PatternList", "", c => Vector[Pattern]()) ++
arule("PatternList", "PatternList1", _.PatternList1[Any]) ++
arule("PatternList1", "Comma Pattern", c => Vector[Pattern](PNil, c.Pattern)) ++
arule("PatternList1", "Comma", c => Vector[Pattern](PNil, PNil)) ++
arule("PatternList1", "Pattern", c => Vector[Pattern](c.Pattern)) ++
arule("PatternList1", "PatternList1 Comma Pattern", c => c.PatternList1[Vector[Pattern]] :+ c.Pattern) ++
arule("PatternList1", "PatternList1 Comma", c => c.PatternList1[Vector[Pattern]] :+ PNil)
val g_comment =
arule("Comment", "CommentText", c => Comment(c.text("CommentText"))) ++
arule("CommentText", "Hash", c => null) ++
arule("CommentText", "CommentText_0 AnyToken", CS.Indent("CommentText_0", "AnyToken"), c => null) ++
arule("ST", "Comment", c => STComment(c.Comment))
val g_show =
arule("ST", "Show PExpr",
CS.Indent("Show", "PExpr"),
c => STShow(c.PExpr))
val g_fail =
arule("ST", "Fail",
c => STFail(None)) ++
arule("ST", "Fail PExpr",
CS.Indent("Fail", "PExpr"),
c => STFail(Some(c.PExpr[Expr])))
val g_val =
arule("ST", "Val Pattern Eq Block",
CS.and(
CS.Indent("Val", "Pattern"),
CS.SameLine("Pattern", "Eq"),
CS.or(CS.Line("Eq", "Block"), CS.Indent("Val", "Block"))),
c => STVal(c.Pattern, c.Block)) ++
arule("ST", "Val IdList",
CS.Indent("Val", "IdList"),
c => STValIntro(c.IdList)) ++
arule("IdList", "IndexedName", c => List[Id](Id(c.text("IndexedName")))) ++
arule("IdList", "IdList IndexedName", c => c.IdList[List[Id]] :+ Id(c.text("IndexedName")))
val g_assign =
arule("ST", "Pattern Eq Block",
CS.and(
CS.SameLine("Pattern", "Eq"),
CS.Protrude("Pattern"),
CS.or(CS.Line("Eq", "Block"), CS.Indent("Pattern", "Block"))),
c => STAssign(c.Pattern, c.Block))
def mkSTDef(cases : Vector[DefCase], memoize : Boolean, contextParam : Option[Expr]) : STDef = {
var result : Map[String, Vector[DefCase]] = Map()
for (c <- cases) {
result.get(c.name) match {
case None => result = result + (c.name -> Vector(c))
case Some(cs) => result = result + (c.name -> (cs :+ c))
}
}
STDef(result, memoize, contextParam)
}
val g_def =
arule("ST", "Def OptContextParam DefCases",
CS.and(
CS.Indent("Def", "OptContextParam"),
CS.Indent("Def", "DefCases")),
c => mkSTDef(c.DefCases, false, c.OptContextParam)) ++
arule("ST", "Def OptContextParam IndexedName ArgumentPattern DefType Eq Block",
CS.and(
CS.SameLine("Def", "IndexedName"),
CS.Indent("Def", "OptContextParam"),
CS.Indent("Def", "ArgumentPattern"),
CS.Indent("Def", "DefType"),
CS.Indent("Def", "Eq"),
CS.Indent("Def", "Block"),
CS.not(CS.NullSpan("Block")),
CS.not(CS.SameLine("Def", "Block"))),
c => mkSTDef(Vector(DefCase(c.text("IndexedName"), c.ArgumentPattern, c.DefType, c.Block)), false, c.OptContextParam)) ++
arule("ST", "Table OptContextParam DefCases",
CS.and(
CS.Indent("Table", "OptContextParam"),
CS.Indent("Table", "DefCases")),
c => mkSTDef(c.DefCases, true, c.OptContextParam)) ++
arule("ST", "Table OptContextParam IndexedName ArgumentPattern DefType Eq Block",
CS.and(
CS.SameLine("Table", "IndexedName"),
CS.Indent("Table", "OptContextParam"),
CS.Indent("Table", "ArgumentPattern"),
CS.Indent("Table", "DefType"),
CS.Indent("Table", "Eq"),
CS.Indent("Table", "Block"),
CS.not(CS.NullSpan("Block")),
CS.not(CS.SameLine("Table", "Block"))),
c => mkSTDef(Vector(DefCase(c.text("IndexedName"), c.ArgumentPattern, c.DefType, c.Block)), true, c.OptContextParam)) ++
arule("DefCases", "", c => Vector[DefCase]()) ++
arule("DefCases", "DefCases_0 DefCase",
CS.Align("DefCases_0", "DefCase"),
c => c.DefCases_0[Vector[DefCase]] :+ c.DefCase) ++
arule("DefCase", "IndexedName ArgumentPattern DefType Eq Block",
CS.and(
CS.Indent("IndexedName", "ArgumentPattern"),
CS.Indent("IndexedName", "DefType"),
CS.Indent("IndexedName", "Eq"),
CS.Indent("IndexedName", "Block")),
c => DefCase(c.text("IndexedName"), c.ArgumentPattern, c.DefType, c.Block)) ++
arule("DefType", "", c => None) ++
arule("DefType", "Colon ScriptValueType", c => Some(c.ScriptValueType[Any]))
val g_datatype =
arule("ST", "Datatype DatatypeCases",
CS.Indent("Datatype", "DatatypeCases"),
c => STDatatype(c.DatatypeCases)) ++
arule("ST", "Datatype IndexedName DatatypeConstrs",
CS.and(
CS.SameLine("Datatype", "IndexedName"),
CS.Indent("Datatype", "DatatypeConstrs")),
c => STDatatype(Vector(DatatypeCase(c.text("IndexedName"), c.DatatypeConstrs)))) ++
arule("DatatypeConstrs", "", c => Vector()) ++
arule("DatatypeConstrs", "DatatypeConstrs_0 DatatypeConstr",
CS.Align("DatatypeConstrs_0", "DatatypeConstr"),
c => c.DatatypeConstrs_0[Vector[DatatypeConstr]] :+ c.DatatypeConstr[DatatypeConstr]) ++
arule("DatatypeConstr", "IndexedName",
c => DatatypeConstr(c.text("IndexedName"), None)) ++
arule("DatatypeConstr", "IndexedName Pattern", CS.Indent("IndexedName", "Pattern"),
c => DatatypeConstr(c.text("IndexedName"), Some(c.Pattern))) ++
arule("DatatypeCases", "", c => Vector()) ++
arule("DatatypeCases", "DatatypeCases_0 DatatypeCase",
CS.Align("DatatypeCases_0", "DatatypeCase"),
c => c.DatatypeCases_0[Vector[DatatypeCase]] :+ c.DatatypeCase[DatatypeCase]) ++
arule("DatatypeCase", "IndexedName DatatypeConstrs",
CS.Indent("IndexedName", "DatatypeConstrs"),
c => DatatypeCase(c.text("IndexedName"), c.DatatypeConstrs))
val g_return =
arule("ST", "Return PExpr", CS.Indent("Return", "PExpr"),
c => STReturn(Some(c.PExpr[Expr]))) ++
arule("ST", "Return", c => STReturn(None))
val g_assume =
arule("ST", "Assume OptAssign PrimitiveExpr",
CS.and(
CS.Indent("Assume", "OptAssign"),
CS.Indent("Assume", "PrimitiveExpr")),
c => STAssume(c.OptAssign, c.PrimitiveExpr))
val g_let =
arule("ST", "Let OptAssign PrimitiveExpr",
CS.and(
CS.Indent("Let", "OptAssign"),
CS.Indent("Let", "PrimitiveExpr")),
c => STLet(c.OptAssign, c.PrimitiveExpr))
val g_choose =
arule("ST", "Choose OptAssign PrimitiveExpr Block",
CS.and(
CS.Indent("Choose", "OptAssign"),
CS.Indent("Choose", "PrimitiveExpr"),
CS.Indent("Choose", "Block")),
c => STChoose(c.OptAssign,
c.PrimitiveExpr,
c.Block))
val g_theorem =
arule("TheoremOptAssign", "Theorem OptAssign", CS.Indent("Theorem", "OptAssign"), c => c.OptAssign[Any]) ++
arule("ST", "TheoremOptAssign BangExpr Block",
CS.and(
CS.Indent("TheoremOptAssign", "BangExpr"),
CS.Indent("TheoremOptAssign", "Block")),
c => STTheorem(c.TheoremOptAssign,
c.BangExpr,
c.Block)) ++
arule("ST", "TheoremOptAssign Block",
CS.and(
CS.Indent("TheoremOptAssign", "Block"),
CS.not(CS.SameLine("TheoremOptAssign", "Block"))),
c => STTheorem(c.TheoremOptAssign,
ParseTree.NilExpr,
c.Block)) ++
arule("ST", "TheoremOptAssign PrimitiveExpr By PExpr",
CS.and(
CS.Indent("TheoremOptAssign", "PrimitiveExpr"),
CS.ifThenElse(CS.Line("TheoremOptAssign", "By"),
CS.Indent("TheoremOptAssign", "PExpr"),
CS.and(Subalign("TheoremOptAssign", "By"), CS.Indent("By", "PExpr")))),
c => STTheoremBy(c.TheoremOptAssign,
c.PrimitiveExpr,
c.PExpr)) ++
arule("ST", "TheoremOptAssign PrimitiveExpr Dot",
CS.and(
CS.Indent("TheoremOptAssign", "PrimitiveExpr"),
CS.SameLine("PrimitiveExpr", "Dot")),
c => STTheoremBy(c.TheoremOptAssign,
c.PrimitiveExpr,
ParseTree.NilExpr))
val g_logic_statements =
arule("OptAssign", "", c => None) ++
arule("OptAssign", "IndexedName Colon", c => Some(c.text("IndexedName"))) ++
g_assume ++ g_let ++ g_choose ++ g_theorem
val g_test =
arule("ST", "Assert PExpr", CS.Indent("Assert", "PExpr"), c => STAssert(c.PExpr)) ++
arule("ST", "Failure Block", CS.Indent("Failure", "Block"), c => STFailure(c.Block))
val g_statement =
g_val ++ g_assign ++ g_def ++ g_datatype ++ g_return ++ g_show ++ g_fail ++
g_logic_statements ++ g_comment ++ g_test ++
arule("Statement", "Expr",
CS.or(CS.Protrude("Expr"), CS.not(CS.First("Expr"))),
c => STExpr(c.Expr)) ++
arule("Statement", "ST", _.ST[Any]) ++
arule("Statement", "STControlFlow", c => STControlFlow(c.STControlFlow)) ++
arule("Statements", "", c => Vector[Statement]()) ++
arule("Statements", "Statements_0 Statement", CS.Align("Statements_0", "Statement"),
c => c.Statements_0[Vector[Statement]] :+ c.Statement) ++
arule("Block", "Statements", c => Block(c.Statements))
val g_header =
arule("ST", "Theory Namespace AliasList Extends NamespaceList",
CS.and(
CS.Indent("Theory", "Namespace"),
CS.Indent("Theory", "AliasList"),
CS.ifThenElse(CS.Line("Theory", "Extends"),
CS.Indent("Theory", "NamespaceList"),
CS.and(CS.Align("Theory", "Extends"), CS.Indent("Extends", "NamespaceList")))),
c => STTheory(Namespace(c.text("Namespace")), c.AliasList[List[(Id, Namespace)]].reverse, c.NamespaceList[List[Namespace]].reverse)) ++
arule("ST", "Theory Namespace AliasList",
CS.and(CS.Indent("Theory", "Namespace"), CS.Indent("Theory", "AliasList")),
c => STTheory(Namespace(c.text("Namespace")), c.AliasList[List[(Id, Namespace)]].reverse, List[Namespace]())) ++
arule("NamespaceList", "", c => List[Namespace]()) ++
arule("NamespaceList", "NamespaceList Namespace",
c => Namespace(c.text("Namespace")) :: c.NamespaceList[List[Namespace]]) ++
arule("AliasList", "", c => List[(Id, Namespace)]()) ++
arule("AliasList", "AliasList_0 Alias",
CS.Align("AliasList_0", "Alias"),
c => c.Alias[(Id, Namespace)] :: c.AliasList_0[List[(Id, Namespace)]]) ++
arule("Alias", "IndexedName Eq Namespace",
c => (Id(c.text("IndexedName")), Namespace(c.text("Namespace"))))
val g_prog =
Syntax.grammar ++
g_literals ++
g_pattern ++
g_expr ++
g_statement ++
g_controlflow ++
g_header ++
arule("ValueQuotedType", "PExpr", _.PExpr[Any]) ++
arule("PatternQuotedType", "Pattern", _.Pattern[Any]) ++
arule("ValueQuotedTerm", "PExprOrFreshVar", _.PExprOrFreshVar[Any]) ++
arule("PatternQuotedTerm", "Pattern", _.Pattern[Any]) ++
arule("Prog", "Block", _.Block[Any])
} | proofpeer/proofpeer-proofscript | shared/src/main/scala/proofpeer/proofscript/frontend/ProofScriptGrammar.scala | Scala | mit | 34,657 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import minitest.SimpleTestSuite
import monix.execution.Scheduler.Implicits.global
import scala.concurrent.{Await, TimeoutException}
import scala.concurrent.duration._
object TaskBlockingSuite extends SimpleTestSuite {
test("blocking on future should work") {
val source1 = Task.evalAsync(100)
val source2 = Task.evalAsync(200).onErrorHandleWith { case e: Exception => Task.raiseError(e) }
val derived = source1.map { x =>
val r = Await.result(source2.runToFuture, 10.seconds)
r + x
}
val result = Await.result(derived.runToFuture, 10.seconds)
assertEquals(result, 300)
}
test("blocking on async") {
for (_ <- 0 until 1000) {
val task = Task.evalAsync(1)
assertEquals(task.runSyncUnsafe(Duration.Inf), 1)
}
}
test("blocking on async.flatMap") {
for (_ <- 0 until 1000) {
val task = Task.evalAsync(1).flatMap(_ => Task.evalAsync(2))
assertEquals(task.runSyncUnsafe(Duration.Inf), 2)
}
}
test("blocking on memoize") {
for (_ <- 0 until 1000) {
val task = Task.evalAsync(1).flatMap(_ => Task.evalAsync(2)).memoize
assertEquals(task.runSyncUnsafe(Duration.Inf), 2)
assertEquals(task.runSyncUnsafe(Duration.Inf), 2)
}
}
test("timeout exception") {
intercept[TimeoutException] {
Task.never[Unit].runSyncUnsafe(100.millis)
}
()
}
}
| alexandru/monifu | monix-eval/jvm/src/test/scala/monix/eval/TaskBlockingSuite.scala | Scala | apache-2.0 | 2,067 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.dynamicpruning
import org.apache.spark.sql.catalyst.expressions.{DynamicPruning, PredicateHelper}
import org.apache.spark.sql.catalyst.expressions.Literal.TrueLiteral
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern._
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
/**
* Removes the filter nodes with dynamic pruning that were not pushed down to the scan.
* These nodes will not be pushed through projects and aggregates with non-deterministic
* expressions.
*/
object CleanupDynamicPruningFilters extends Rule[LogicalPlan] with PredicateHelper {
override def apply(plan: LogicalPlan): LogicalPlan = {
if (!conf.dynamicPartitionPruningEnabled) {
return plan
}
plan.transformWithPruning(
// No-op for trees that do not contain dynamic pruning.
_.containsAnyPattern(DYNAMIC_PRUNING_EXPRESSION, DYNAMIC_PRUNING_SUBQUERY)) {
// pass through anything that is pushed down into PhysicalOperation
case p @ PhysicalOperation(_, _, LogicalRelation(_: HadoopFsRelation, _, _, _)) => p
// remove any Filters with DynamicPruning that didn't get pushed down to PhysicalOperation.
case f @ Filter(condition, _) =>
val newCondition = condition.transformWithPruning(
_.containsAnyPattern(DYNAMIC_PRUNING_EXPRESSION, DYNAMIC_PRUNING_SUBQUERY)) {
case _: DynamicPruning => TrueLiteral
}
f.copy(condition = newCondition)
}
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/dynamicpruning/CleanupDynamicPruningFilters.scala | Scala | apache-2.0 | 2,495 |
package net.sansa_stack.inference.utils
/**
* @author Lorenz Buehmann
*/
object EntailmentRegimes {
sealed abstract class EntailmentRegime() {}
case object RDFS extends EntailmentRegime()
case object OWL extends EntailmentRegime()
}
| SANSA-Stack/SANSA-RDF | sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/utils/EntailmentRegimes.scala | Scala | apache-2.0 | 247 |
package org.openeyes.api.services.workflow
import org.bson.types.ObjectId
import org.openeyes.api.utils.Date._
import org.openeyes.api.forms.workflow.TicketForm
import org.openeyes.api.models.Patient
import org.openeyes.api.models.workflow.Ticket
/**
* Created by stu on 23/09/2014.
*/
object TicketService {
def create(form: TicketForm) = {
val patient = Patient.findOneById(new ObjectId(form.patientId))
val ticket = Ticket(new ObjectId, new ObjectId(form.workflowId), patient.get, createdAt = setTimestamp)
Ticket.save(ticket)
ticket
}
def find(id: String) = {
Ticket.findOneById(new ObjectId(id))
}
def findAllForWorkflow(workflowId: String, stepIndex: Option[Int], includeCompleted: Boolean = false): Seq[Ticket] = {
Ticket.findAllForWorkflow(workflowId, stepIndex, includeCompleted)
}
def updateStepIndexOrComplete(ticketId: ObjectId, currentStepIndex: Int) = {
val ticket = Ticket.findOneById(ticketId) match {
case Some(ticket) => ticket
case None => throw new RuntimeException("Ticket not found")
}
val workflowStepsCount = WorkflowService.find(ticket.workflowId.toString) match {
case Some(workflow) => (workflow.steps.length - 1)
case None => throw new RuntimeException("Workflow not found")
}
if(currentStepIndex < workflowStepsCount){
// Increment stepIndex to the next step
ticket.stepIndex = currentStepIndex + 1
}else{
// Else set to completed as we have hit the steps limit
ticket.completed = true
}
Ticket.update(ticket)
// Return the ticket in case its needed
ticket
}
}
| openeyes/poc-backend | src/main/scala/org/openeyes/api/services/workflow/TicketService.scala | Scala | gpl-3.0 | 1,629 |
package colossus.examples
import colossus.core._
import colossus.protocols.websocket._
import colossus.streaming.PushResult
import subprotocols.rawstring._
import akka.actor._
import scala.concurrent.duration._
class PrimeGenerator extends Actor {
var lastPrime = 1
def receive = {
case Next => sender() ! nextPrime
}
def nextPrime = {
var nextPrime = lastPrime
var prime = false
while (!prime) {
nextPrime += 1
var n = 1
var ok = true
while (n < nextPrime - 1 && ok) {
n += 1
if (nextPrime % n == 0) {
ok = false
}
}
prime = ok
}
lastPrime = nextPrime
nextPrime
}
}
case object Next
object WebsocketExample {
def start(port: Int)(implicit io: IOSystem) = {
val generator = io.actorSystem.actorOf(Props[PrimeGenerator])
WebsocketServer.start[RawString]("websocket", port) { worker =>
new WebsocketInitializer[RawString](worker) {
def provideCodec() = new RawStringCodec
def onConnect =
ctx =>
new WebsocketServerHandler[RawString](ctx) with ProxyActor {
private var sending = false
def shutdownRequest() {
upstream.connection.disconnect()
}
override def onConnected() {
send("HELLO THERE!")
}
override def onShutdown() {
send("goodbye!")
}
def handle = {
case "START" => {
sending = true
generator ! Next
}
case "STOP" => {
sending = false
}
case "LARGE" => {
send((0 to 1000).mkString)
}
case "MANY" => {
//send one message per event loop iteration
def next(i: Int) {
if (i > 0) send(i.toString) match {
case PushResult.Ok => next(i - 1)
case PushResult.Full(signal) => signal.notify { next(i - 1) }
case _ => {}
}
}
next(1000)
}
case "EXIT" => {
//uhhh
upstream.connection.disconnect()
}
case other => {
send(s"unknown command: $other")
}
}
def handleError(reason: Throwable) {}
def receive = {
case prime: Integer => {
send(s"PRIME: $prime")
if (sending) {
import io.actorSystem.dispatcher
io.actorSystem.scheduler.scheduleOnce(100.milliseconds, generator, Next)
}
}
}
}
}
}
}
}
| tumblr/colossus | colossus-examples/src/main/scala/colossus.examples/WebsocketExample.scala | Scala | apache-2.0 | 2,964 |
package models.database
import models.ReportsModel
import ReportsModel._
import org.joda.time.LocalDate
import play.api.db.slick.Config.driver.simple._
object ReportsDatabase {
val reports = TableQuery[Reports]
}
case class ReportEntity(world: String, date: LocalDate, detected: Int, banned: Int, deleted: Int)
class Reports(tag: Tag) extends Table[ReportEntity](tag, "report") {
implicit val dateColumnType = MappedColumnType.base[LocalDate, String]({ dateFormatter.print(_) }, { dateFormatter.parseLocalDate })
def world = column[String]("world", O.DBType("VARCHAR(5)"))
def date = column[LocalDate]("date", O.DBType("DATE"))
def detected = column[Int]("detected", O.Default(0))
def banned = column[Int]("banned", O.Default(0))
def deleted = column[Int]("deleted", O.Default(0))
def pk = primaryKey("pk_entry", (world, date))
def * = (world, date, detected, banned, deleted) <> (ReportEntity.tupled, ReportEntity.unapply)
}
| pvoznenko/play-slick-angular-test-example | app/models/database/ReportsDatabase.scala | Scala | mit | 948 |
package com.magmanics.licensing.client
import javax.ws.rs._
import javax.ws.rs.core.MediaType
import com.magmanics.auditing.model.{AuditCode, Audit}
import com.magmanics.auditing.service.AuditSearchDto
import org.jboss.resteasy.client.jaxrs.{BasicAuthentication, ResteasyClientBuilder}
/**
* REST Client for Licensing audit endpoint
*
* @author James Baxter - 27/08/2014.
*/
@Consumes(Array(MediaType.APPLICATION_JSON, "application/*+json", "text/json"))
@Produces(Array(MediaType.APPLICATION_JSON, "application/*+json", "text/json"))
trait AuditClient {
@POST
def create(audit: Audit)
/**
* @return a list of the distinct usernames for which there are audit entries
*/
@GET
@Path("/usernames")
def getUsernames: Seq[String]
/**
* @return a sequence of the distinct audit codes for which there are audit entries
*/
@GET
@Path("/auditcodes")
def getAuditCodes: Seq[AuditCode]
/**
* @return a sequence of [[com.magmanics.auditing.model.Audit Audits]] which conform to the given search criteria
*/
@POST
@Path("/messages")
def getAuditMessages(auditSearch: AuditSearchDto): Seq[Audit]
} | manicmonkey/licensing | Licensing-Client/src/main/scala/com/magmanics/licensing/client/AuditClient.scala | Scala | gpl-3.0 | 1,140 |
abstract class List {
def ::(a: Any) = new ::(a, this)
}
case object Nil extends List
case class ::(head: Any, tail: List) extends List
object Main extends App {
def show(lst: List) {
lst match {
case ::(h, t) => { println(h); show(t) }
case Nil =>
}
}
val friends = "Fred" :: "Wilma" :: "Barney" :: Nil
println(friends)
println(friends == "Fred" :: "Wilma" :: "Barney" :: Nil)
println(friends == "Fred" :: "Barney" :: "Wilma" :: Nil)
show(friends)
}
| yeahnoob/scala-impatient-2e-code | src/ch14/sec13/List.scala | Scala | gpl-3.0 | 493 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.ejson
import slamdata.Predef.{Char => SChar, String}
import quasar.contrib.specs2.Spec
import matryoshka.data.Fix
import scalaz._, Scalaz._
class DecodeEJsonSpec extends Spec {
type J = Fix[EJson]
val J = Fixed[J]
"char" >> {
"Extension.Char" >> prop { c: SChar =>
DecodeEJson[SChar].decode(J.char(c)).toDisjunction ≟ c.right
}
"single character Common.Str" >> prop { c: SChar =>
DecodeEJson[SChar].decode(J.str(c.toString)).toDisjunction ≟ c.right
}
"fail for multicharacter Common.Str" >> prop {
(s: String) => (s.length > 1) ==> {
DecodeEJson[SChar].decode(J.str(s)).toDisjunction.isLeft
}}
}
}
| slamdata/slamengine | ejson/src/test/scala/quasar/ejson/DecodeEjsonSpec.scala | Scala | apache-2.0 | 1,285 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.psi.PsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
trait ScIf extends ScExpression {
def condition: Option[ScExpression]
def thenExpression: Option[ScExpression]
def elseKeyword: Option[PsiElement]
def elseExpression: Option[ScExpression]
def leftParen: Option[PsiElement]
def rightParen: Option[PsiElement]
override protected def acceptScala(visitor: ScalaElementVisitor): Unit = {
visitor.visitIf(this)
}
}
object ScIf {
def unapply(ifStmt: ScIf): Some[(Option[ScExpression], Option[ScExpression], Option[ScExpression])] = Some(ifStmt.condition, ifStmt.thenExpression, ifStmt.elseExpression)
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScIf.scala | Scala | apache-2.0 | 769 |
package org.apache.hadoop.mapred
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.util.ReflectionUtils
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.io.Text
import java.text.SimpleDateFormat
import java.text.NumberFormat
import java.io.IOException
import java.net.URI
import java.util.Date
import spark.Logging
import spark.SerializableWritable
/**
* Internal helper class that saves an RDD using a Hadoop OutputFormat. This is only public
* because we need to access this class from the `spark` package to use some package-private Hadoop
* functions, but this class should not be used directly by users.
*
* Saves the RDD using a JobConf, which should contain an output key class, an output value class,
* a filename to write to, etc, exactly like in a Hadoop MapReduce job.
*/
class HadoopWriter(@transient jobConf: JobConf) extends Logging with HadoopMapRedUtil with Serializable {
private val now = new Date()
private val conf = new SerializableWritable(jobConf)
private var jobID = 0
private var splitID = 0
private var attemptID = 0
private var jID: SerializableWritable[JobID] = null
private var taID: SerializableWritable[TaskAttemptID] = null
@transient private var writer: RecordWriter[AnyRef,AnyRef] = null
@transient private var format: OutputFormat[AnyRef,AnyRef] = null
@transient private var committer: OutputCommitter = null
@transient private var jobContext: JobContext = null
@transient private var taskContext: TaskAttemptContext = null
def preSetup() {
setIDs(0, 0, 0)
setConfParams()
val jCtxt = getJobContext()
getOutputCommitter().setupJob(jCtxt)
}
def setup(jobid: Int, splitid: Int, attemptid: Int) {
setIDs(jobid, splitid, attemptid)
setConfParams()
}
def open() {
val numfmt = NumberFormat.getInstance()
numfmt.setMinimumIntegerDigits(5)
numfmt.setGroupingUsed(false)
val outputName = "part-" + numfmt.format(splitID)
val path = FileOutputFormat.getOutputPath(conf.value)
val fs: FileSystem = {
if (path != null) {
path.getFileSystem(conf.value)
} else {
FileSystem.get(conf.value)
}
}
getOutputCommitter().setupTask(getTaskContext())
writer = getOutputFormat().getRecordWriter(
fs, conf.value, outputName, Reporter.NULL)
}
def write(key: AnyRef, value: AnyRef) {
if (writer!=null) {
//println (">>> Writing ("+key.toString+": " + key.getClass.toString + ", " + value.toString + ": " + value.getClass.toString + ")")
writer.write(key, value)
} else {
throw new IOException("Writer is null, open() has not been called")
}
}
def close() {
writer.close(Reporter.NULL)
}
def commit() {
val taCtxt = getTaskContext()
val cmtr = getOutputCommitter()
if (cmtr.needsTaskCommit(taCtxt)) {
try {
cmtr.commitTask(taCtxt)
logInfo (taID + ": Committed")
} catch {
case e: IOException => {
logError("Error committing the output of task: " + taID.value, e)
cmtr.abortTask(taCtxt)
throw e
}
}
} else {
logWarning ("No need to commit output of task: " + taID.value)
}
}
def cleanup() {
getOutputCommitter().cleanupJob(getJobContext())
}
// ********* Private Functions *********
private def getOutputFormat(): OutputFormat[AnyRef,AnyRef] = {
if (format == null) {
format = conf.value.getOutputFormat()
.asInstanceOf[OutputFormat[AnyRef,AnyRef]]
}
return format
}
private def getOutputCommitter(): OutputCommitter = {
if (committer == null) {
committer = conf.value.getOutputCommitter
}
return committer
}
private def getJobContext(): JobContext = {
if (jobContext == null) {
jobContext = newJobContext(conf.value, jID.value)
}
return jobContext
}
private def getTaskContext(): TaskAttemptContext = {
if (taskContext == null) {
taskContext = newTaskAttemptContext(conf.value, taID.value)
}
return taskContext
}
private def setIDs(jobid: Int, splitid: Int, attemptid: Int) {
jobID = jobid
splitID = splitid
attemptID = attemptid
jID = new SerializableWritable[JobID](HadoopWriter.createJobID(now, jobid))
taID = new SerializableWritable[TaskAttemptID](
new TaskAttemptID(new TaskID(jID.value, true, splitID), attemptID))
}
private def setConfParams() {
conf.value.set("mapred.job.id", jID.value.toString)
conf.value.set("mapred.tip.id", taID.value.getTaskID.toString)
conf.value.set("mapred.task.id", taID.value.toString)
conf.value.setBoolean("mapred.task.is.map", true)
conf.value.setInt("mapred.task.partition", splitID)
}
}
object HadoopWriter {
def createJobID(time: Date, id: Int): JobID = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
val jobtrackerID = formatter.format(new Date())
return new JobID(jobtrackerID, id)
}
def createPathFromString(path: String, conf: JobConf): Path = {
if (path == null) {
throw new IllegalArgumentException("Output path is null")
}
var outputPath = new Path(path)
val fs = outputPath.getFileSystem(conf)
if (outputPath == null || fs == null) {
throw new IllegalArgumentException("Incorrectly formatted output path")
}
outputPath = outputPath.makeQualified(fs)
return outputPath
}
}
| koeninger/spark | core/src/main/scala/spark/HadoopWriter.scala | Scala | bsd-3-clause | 5,482 |
package app
import org.scalatra._
import scalate.ScalateSupport
import org.json4s.{DefaultFormats, Formats}
import org.scalatra.json._
import controllers.MenuData
class pageApiServletJson extends ResumeAppStack with JacksonJsonSupport {
protected implicit val jsonFormats: Formats = DefaultFormats
before() {
contentType = formats("json")
}
get("/:pageSlug.json") {
val menuName:String = params.getOrElse("pageSlug", "home").toLowerCase
//MenuData.getMenu(pageSlug)
}
}
| gregoryboucher/resume-app | src/main/scala/app/pageApiServlet.scala | Scala | mit | 496 |
package intellij.haskell.runconfig.console
import java.util.concurrent.ConcurrentHashMap
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiFile
import scala.collection.concurrent
import scala.jdk.CollectionConverters._
object HaskellConsoleViewMap {
private val consoleViews = new ConcurrentHashMap[Editor, HaskellConsoleView]().asScala
def addConsole(console: HaskellConsoleView): Unit = {
consoleViews.put(console.getConsoleEditor, console)
}
def delConsole(console: HaskellConsoleView): Unit = {
consoleViews.remove(console.getConsoleEditor)
}
def getConsole(editor: Editor): Option[HaskellConsoleView] = {
consoleViews.get(editor)
}
def getConsole(editor: Project): Option[HaskellConsoleView] = {
consoleViews.values.find(console => console.project == editor && console.isShowing)
}
// File is project file and not file which represents console
val projectFileByConfigName: concurrent.Map[String, PsiFile] = new ConcurrentHashMap[String, PsiFile]().asScala
}
| rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/runconfig/console/HaskellConsoleViewMap.scala | Scala | apache-2.0 | 1,073 |
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.io
import java.net.DatagramSocket
import java.net.InetSocketAddress
import com.typesafe.config.Config
import scala.collection.immutable
import akka.io.Inet.{ SoJavaFactories, SocketOption }
import akka.util.Helpers.Requiring
import akka.util.ByteString
import akka.actor._
/**
* UDP Extension for Akka’s IO layer.
*
* This extension implements the connectionless UDP protocol without
* calling `connect` on the underlying sockets, i.e. without restricting
* from whom data can be received. For “connected” UDP mode see [[UdpConnected]].
*
* For a full description of the design and philosophy behind this IO
* implementation please refer to <a href="http://doc.akka.io/">the Akka online documentation</a>.
*
* The Java API for generating UDP commands is available at [[UdpMessage]].
*/
object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
override def lookup = Udp
override def createExtension(system: ExtendedActorSystem): UdpExt = new UdpExt(system)
/**
* Java API: retrieve the Udp extension for the given system.
*/
override def get(system: ActorSystem): UdpExt = super.get(system)
/**
* The common interface for [[Command]] and [[Event]].
*/
sealed trait Message
/**
* The common type of all commands supported by the UDP implementation.
*/
trait Command extends SelectionHandler.HasFailureMessage with Message {
def failureMessage = CommandFailed(this)
}
/**
* Each [[Send]] can optionally request a positive acknowledgment to be sent
* to the commanding actor. If such notification is not desired the [[Send#ack]]
* must be set to an instance of this class. The token contained within can be used
* to recognize which write failed when receiving a [[CommandFailed]] message.
*/
case class NoAck(token: Any) extends Event
/**
* Default [[NoAck]] instance which is used when no acknowledgment information is
* explicitly provided. Its “token” is `null`.
*/
object NoAck extends NoAck(null)
/**
* This message is understood by the “simple sender” which can be obtained by
* sending the [[SimpleSender]] query to the [[UdpExt#manager]] as well as by
* the listener actors which are created in response to [[Bind]]. It will send
* the given payload data as one UDP datagram to the given target address. The
* UDP actor will respond with [[CommandFailed]] if the send could not be
* enqueued to the O/S kernel because the send buffer was full. If the given
* `ack` is not of type [[NoAck]] the UDP actor will reply with the given
* object as soon as the datagram has been successfully enqueued to the O/S
* kernel.
*
* The sending UDP socket’s address belongs to the “simple sender” which does
* not handle inbound datagrams and sends from an ephemeral port; therefore
* sending using this mechanism is not suitable if replies are expected, use
* [[Bind]] in that case.
*/
final case class Send(payload: ByteString, target: InetSocketAddress, ack: Event) extends Command {
require(ack != null, "ack must be non-null. Use NoAck if you don't want acks.")
def wantsAck: Boolean = !ack.isInstanceOf[NoAck]
}
object Send {
def apply(data: ByteString, target: InetSocketAddress): Send = Send(data, target, NoAck)
}
/**
* Send this message to the [[UdpExt#manager]] in order to bind to the given
* local port (or an automatically assigned one if the port number is zero).
* The listener actor for the newly bound port will reply with a [[Bound]]
* message, or the manager will reply with a [[CommandFailed]] message.
*/
final case class Bind(handler: ActorRef,
localAddress: InetSocketAddress,
options: immutable.Traversable[SocketOption] = Nil) extends Command
/**
* Send this message to the listener actor that previously sent a [[Bound]]
* message in order to close the listening socket. The recipient will reply
* with an [[Unbound]] message.
*/
case object Unbind extends Command
/**
* Retrieve a reference to a “simple sender” actor of the UDP extension.
* The newly created “simple sender” will reply with the [[SimpleSenderReady]] notification.
*
* The “simple sender” is a convenient service for being able to send datagrams
* when the originating address is meaningless, i.e. when no reply is expected.
*
* The “simple sender” will not stop itself, you will have to send it a [[akka.actor.PoisonPill]]
* when you want to close the socket.
*/
case class SimpleSender(options: immutable.Traversable[SocketOption] = Nil) extends Command
object SimpleSender extends SimpleSender(Nil)
/**
* Send this message to a listener actor (which sent a [[Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernel’s receive
* buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the `ResumeReading` command.
*/
case object SuspendReading extends Command
/**
* This message must be sent to the listener actor to re-enable reading from
* the socket after a `SuspendReading` command.
*/
case object ResumeReading extends Command
/**
* The common type of all events emitted by the UDP implementation.
*/
trait Event extends Message
/**
* When a listener actor receives a datagram from its socket it will send
* it to the handler designated in the [[Bind]] message using this message type.
*/
final case class Received(data: ByteString, sender: InetSocketAddress) extends Event
/**
* When a command fails it will be replied to with this message type,
* wrapping the failing command object.
*/
final case class CommandFailed(cmd: Command) extends Event
/**
* This message is sent by the listener actor in response to a [[Bind]] command.
* If the address to bind to specified a port number of zero, then this message
* can be inspected to find out which port was automatically assigned.
*/
final case class Bound(localAddress: InetSocketAddress) extends Event
/**
* The “simple sender” sends this message type in response to a [[SimpleSender]] query.
*/
sealed trait SimpleSenderReady extends Event
case object SimpleSenderReady extends SimpleSenderReady
/**
* This message is sent by the listener actor in response to an `Unbind` command
* after the socket has been closed.
*/
sealed trait Unbound
case object Unbound extends Unbound
/**
* Scala API: This object provides access to all socket options applicable to UDP sockets.
*
* For the Java API see [[UdpSO]].
*/
object SO extends Inet.SoForwarders {
/**
* [[akka.io.Inet.SocketOption]] to set the SO_BROADCAST option
*
* For more information see [[java.net.DatagramSocket#setBroadcast]]
*/
final case class Broadcast(on: Boolean) extends SocketOption {
override def beforeDatagramBind(s: DatagramSocket): Unit = s.setBroadcast(on)
}
}
private[io] class UdpSettings(_config: Config) extends SelectionHandlerSettings(_config) {
import _config._
val NrOfSelectors: Int = getInt("nr-of-selectors") requiring (_ > 0, "nr-of-selectors must be > 0")
val DirectBufferSize: Int = getIntBytes("direct-buffer-size")
val MaxDirectBufferPoolSize: Int = getInt("direct-buffer-pool-limit")
val BatchReceiveLimit: Int = getInt("receive-throughput")
val ManagementDispatcher: String = getString("management-dispatcher")
override val MaxChannelsPerSelector: Int = if (MaxChannels == -1) -1 else math.max(MaxChannels / NrOfSelectors, 1)
private[this] def getIntBytes(path: String): Int = {
val size = getBytes(path)
require(size < Int.MaxValue, s"$path must be < 2 GiB")
size.toInt
}
}
}
class UdpExt(system: ExtendedActorSystem) extends IO.Extension {
import Udp.UdpSettings
val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp"))
val manager: ActorRef = {
system.systemActorOf(
props = Props(classOf[UdpManager], this).withDeploy(Deploy.local),
name = "IO-UDP-FF")
}
/**
* Java API: retrieve the UDP manager actor’s reference.
*/
def getManager: ActorRef = manager
/**
* INTERNAL API
*/
private[io] val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize)
}
/**
* Java API: factory methods for the message types used when communicating with the Udp service.
*/
object UdpMessage {
import Udp._
import java.lang.{ Iterable ⇒ JIterable }
import scala.collection.JavaConverters._
import language.implicitConversions
/**
* Each [[Udp.Send]] can optionally request a positive acknowledgment to be sent
* to the commanding actor. If such notification is not desired the [[Udp.Send#ack]]
* must be set to an instance of this class. The token contained within can be used
* to recognize which write failed when receiving a [[Udp.CommandFailed]] message.
*/
def noAck(token: AnyRef): NoAck = NoAck(token)
/**
* Default [[Udp.NoAck]] instance which is used when no acknowledgment information is
* explicitly provided. Its “token” is `null`.
*/
def noAck: NoAck = NoAck
/**
* This message is understood by the “simple sender” which can be obtained by
* sending the [[Udp.SimpleSender]] query to the [[UdpExt#manager]] as well as by
* the listener actors which are created in response to [[Udp.Bind]]. It will send
* the given payload data as one UDP datagram to the given target address. The
* UDP actor will respond with [[Udp.CommandFailed]] if the send could not be
* enqueued to the O/S kernel because the send buffer was full. If the given
* `ack` is not of type [[Udp.NoAck]] the UDP actor will reply with the given
* object as soon as the datagram has been successfully enqueued to the O/S
* kernel.
*
* The sending UDP socket’s address belongs to the “simple sender” which does
* not handle inbound datagrams and sends from an ephemeral port; therefore
* sending using this mechanism is not suitable if replies are expected, use
* [[Udp.Bind]] in that case.
*/
def send(payload: ByteString, target: InetSocketAddress, ack: Event): Command = Send(payload, target, ack)
/**
* The same as `send(payload, target, noAck())`.
*/
def send(payload: ByteString, target: InetSocketAddress): Command = Send(payload, target)
/**
* Send this message to the [[UdpExt#manager]] in order to bind to the given
* local port (or an automatically assigned one if the port number is zero).
* The listener actor for the newly bound port will reply with a [[Udp.Bound]]
* message, or the manager will reply with a [[Udp.CommandFailed]] message.
*/
def bind(handler: ActorRef, endpoint: InetSocketAddress, options: JIterable[SocketOption]): Command =
Bind(handler, endpoint, options.asScala.to)
/**
* Bind without specifying options.
*/
def bind(handler: ActorRef, endpoint: InetSocketAddress): Command = Bind(handler, endpoint, Nil)
/**
* Send this message to the listener actor that previously sent a [[Udp.Bound]]
* message in order to close the listening socket. The recipient will reply
* with an [[Udp.Unbound]] message.
*/
def unbind: Command = Unbind
/**
* Retrieve a reference to a “simple sender” actor of the UDP extension.
* The newly created “simple sender” will reply with the [[Udp.SimpleSenderReady]] notification.
*
* The “simple sender” is a convenient service for being able to send datagrams
* when the originating address is meaningless, i.e. when no reply is expected.
*
* The “simple sender” will not stop itself, you will have to send it a [[akka.actor.PoisonPill]]
* when you want to close the socket.
*/
def simpleSender(options: JIterable[SocketOption]): Command = SimpleSender(options.asScala.to)
/**
* Retrieve a simple sender without specifying options.
*/
def simpleSender: Command = SimpleSender
/**
* Send this message to a listener actor (which sent a [[Udp.Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernel’s receive
* buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the `Udp.ResumeReading` command.
*/
def suspendReading: Command = SuspendReading
/**
* This message must be sent to the listener actor to re-enable reading from
* the socket after a `Udp.SuspendReading` command.
*/
def resumeReading: Command = ResumeReading
}
object UdpSO extends SoJavaFactories {
import Udp.SO._
/**
* [[akka.io.Inet.SocketOption]] to set the SO_BROADCAST option
*
* For more information see [[java.net.DatagramSocket#setBroadcast]]
*/
def broadcast(on: Boolean) = Broadcast(on)
}
| jmnarloch/akka.js | akka-js-actor/jvm/src/main/scala/akka/io/Udp.scala | Scala | bsd-3-clause | 13,086 |
import javax.inject.Inject
import play.api.OptionalDevContext
import play.api.http._
import play.api.mvc._
import play.api.mvc.request.RequestTarget
import play.api.routing.Router
import play.core.WebCommands
/**
* Handles all requests.
*
* https://www.playframework.com/documentation/latest/ScalaHttpRequestHandlers#extending-the-default-request-handler
*/
class RequestHandler @Inject()(webCommands: WebCommands,
optDevContext: OptionalDevContext,
router: Router,
errorHandler: HttpErrorHandler,
configuration: HttpConfiguration,
filters: HttpFilters)
extends DefaultHttpRequestHandler(webCommands,
optDevContext,
router,
errorHandler,
configuration,
filters) {
override def handlerForRequest(
request: RequestHeader): (RequestHeader, Handler) = {
super.handlerForRequest {
// ensures that REST API does not need a trailing "/"
if (isREST(request)) {
addTrailingSlash(request)
} else {
request
}
}
}
private def isREST(request: RequestHeader) = {
request.uri match {
case uri: String if uri.contains("post") => true
case _ => false
}
}
private def addTrailingSlash(origReq: RequestHeader): RequestHeader = {
if (!origReq.path.endsWith("/")) {
val path = origReq.path + "/"
if (origReq.rawQueryString.isEmpty) {
origReq.withTarget(
RequestTarget(path = path, uriString = path, queryString = Map())
)
} else {
origReq.withTarget(
RequestTarget(path = path,
uriString = origReq.uri,
queryString = origReq.queryString)
)
}
} else {
origReq
}
}
}
| gengstrand/clojure-news-feed | server/feed12/app/RequestHandler.scala | Scala | epl-1.0 | 2,067 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package compiler.lang.backend
import compiler.Common
import compiler.lang.core.Core
/** Backend-related (but backend-agnostic) transformations. */
private[compiler] trait Backend extends Common
with Caching
with Order
with Specialization {
self: Core =>
object Backend {
/** Delegates to [[Specialization.specialize]]. */
def specialize(backend: BackendAPI) =
Specialization.specialize(backend)
/** Delegates to [[Caching.addCacheCalls]]. */
lazy val addCacheCalls = Caching.addCacheCalls
}
}
| aalexandrov/emma | emma-language/src/main/scala/org/emmalanguage/compiler/lang/backend/Backend.scala | Scala | apache-2.0 | 1,178 |
package org.bitcoins.commons.jsonmodels.cli
import org.bitcoins.commons.serializers.Picklers
import org.bitcoins.core.protocol.dlc.models.DLCPayoutCurve
import org.bitcoins.core.protocol.tlv.{
ContractDescriptorTLV,
ContractDescriptorV0TLV,
ContractDescriptorV1TLV,
DLCSerializationVersion,
DigitDecompositionEventDescriptorV0TLV,
OracleAnnouncementTLV,
RoundingIntervalsV0TLV,
TLVPoint
}
import ujson.{Arr, Bool, Null, Num, Obj, Str}
object ContractDescriptorParser {
def parseCmdLine(
value: ujson.Value,
announcementTLV: OracleAnnouncementTLV): ContractDescriptorTLV = {
value match {
case obj: Obj =>
upickle.default
.read[ContractDescriptorV0TLV](obj)(Picklers.contractDescriptorV0)
case arr: Arr =>
//we read the number of digits from the announcement,
//take in tlv points for the payout curve
//and don't provide access to give a rounding mode as a parameter
val payoutPoints: Vector[TLVPoint] = arr.value.toVector.map { pointJs =>
upickle.default
.read[TLVPoint](pointJs)(Picklers.tlvPointReader)
}
val payoutCurve = DLCPayoutCurve
.fromPoints(payoutPoints,
serializationVersion = DLCSerializationVersion.Beta)
.toTLV
val numDigits = announcementTLV.eventTLV.eventDescriptor
.asInstanceOf[DigitDecompositionEventDescriptorV0TLV]
.numDigits
.toInt
ContractDescriptorV1TLV(numDigits,
payoutCurve,
RoundingIntervalsV0TLV.noRounding)
case fail @ (_: Num | _: Bool | Null | _: Str) =>
sys.error(
s"Cannot parse contract descriptor from $fail, expected json object or array")
}
}
}
| bitcoin-s/bitcoin-s | app-commons/src/main/scala/org/bitcoins/commons/jsonmodels/cli/ContractDescriptorParser.scala | Scala | mit | 1,805 |
package p04Euler
import p05Func.Prime
object SumOfPrime {
def main(args: Array[String]): Unit = {
val lp = Prime.listPrime
println(lp.takeWhile(_ < 2000000).toList.sum)
println(lp.takeWhile(_ < 10).toList.sum)
}
}
| vkubicki/ScalaTest | src/main/scala/p04Euler/010 - SumOfPrime.scala | Scala | mit | 243 |
package vtpassim
import org.apache.spark.sql.SparkSession
case class ONBRecord(id: String, issue: String, series: String, seq: Int,
date: String, text: String, page_access: String, book_access: String)
object ONB {
def main(args: Array[String]) {
val spark = SparkSession.builder().appName("ONB import").getOrCreate()
import spark.implicits._
spark.sparkContext.hadoopConfiguration
.set("mapreduce.input.fileinputformat.input.dir.recursive", "true")
val datePat = """(\\d{4})(\\d\\d)(\\d\\d)$""".r.unanchored
spark.sparkContext.wholeTextFiles(args(0), spark.sparkContext.defaultParallelism)
.filter(_._1.contains(".xml"))
.flatMap { f =>
val t = scala.xml.XML.loadString(f._2)
val name = t \\ "newspaper" \\ "name"
val rawSeries = (name \\ "@anno_id").text + (name \\ "@anno-id").text
val series = (if (rawSeries == "aid") "dea" else rawSeries)
(t \\ "newspaper" \\ "issue").flatMap { issue =>
val book_access = (issue \\ "path").text
val date = book_access match { case datePat(y,m,d) => s"$y-$m-$d" case _ => "" }
if ( date == "" ) Nil
else
(issue \\ "pages" \\ "page").map { page =>
val seqstr = (page \\ "number").text match { case "" => "0" case x => x }
ONBRecord(s"$series/$date/$seqstr", s"$series/$date",
series, seqstr.toInt, date,
(page \\ "text").text,
(page \\ "pagePath").text, book_access)
}
}
}
.toDF
.dropDuplicates("id")
.write.save(args(1))
spark.stop()
}
}
| ViralTexts/vt-passim | src/main/scala/ONB.scala | Scala | apache-2.0 | 1,589 |
package xyz.hyperreal.btree
import collection.SortedMap
import collection.mutable.{Map, MapLike, AbstractMap}
object MutableSortedMap {
private val DEFAULT_ORDER = 10
}
class MutableSortedMap[K <% Ordered[K], V]( btree: BPlusTree[K, V] ) extends SortedMap[K, V] {
def this() = this( new MemoryBPlusTree[K, V](MutableSortedMap.DEFAULT_ORDER) )
implicit def ordering = implicitly[Ordering[K]]
def +=( kv: (K, V) ) = {
kv match {case (k, v) => btree insert (k, v)}
this
}
def -=( key: K ) = {
btree delete key
this
}
override def +[V1 >: V]( kv: (K, V1) ) = {
val newtree = new MemoryBPlusTree[K, V]( MutableSortedMap.DEFAULT_ORDER )
newtree.load( btree.boundedIterator( ('<, kv._1) ).toList: _* )
newtree.insert( kv._1, kv._2 )
newtree.load( btree.boundedIterator( ('>, kv._1) ).toList: _* )
new MutableSortedMap( newtree )
}
override def -( key: K ) = {
val newtree = new MemoryBPlusTree[K, V]( MutableSortedMap.DEFAULT_ORDER )
newtree.load( btree.boundedIterator( ('<, key) ).toList: _* )
newtree.load( btree.boundedIterator( ('>, key) ).toList: _* )
new MutableSortedMap( newtree )
}
def rangeImpl( from: Option[K], until: Option[K] ) = {
val bounds =
(from, until) match {
case (None, None) => Nil
case (None, Some( u )) => List( ('<, u) )
case (Some( l ), None) => List( ('>=, l) )
case (Some( l ), Some( u )) => List( ('>=, l), ('<, u) )
}
val view = new MutableSortedMap[K, V]( btree ) {
override def iterator = if (bounds isEmpty) btree.iterator else btree.boundedIterator( bounds: _* )
}
view
}
override def empty = new MutableSortedMap[K, V]
def get( key: K ) = btree search key
def iterator = btree iterator
def iteratorFrom( start: K ) = btree.boundedIterator( ('>=, start) )
def keysIteratorFrom( start: K ) = btree.boundedKeysIterator( ('>=, start) )
def valuesIteratorFrom( start: K ) = btree.boundedValuesIterator( ('>=, start) )
} | edadma/b-tree | src/main/scala/MutableSortedMap.scala | Scala | isc | 1,963 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.UUID
import java.util.concurrent.TimeUnit._
import scala.collection.JavaConverters._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateUnsafeProjection, Predicate}
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark
import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, Partitioning}
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.execution.streaming.state._
import org.apache.spark.sql.streaming.{OutputMode, StateOperatorProgress}
import org.apache.spark.sql.types._
import org.apache.spark.util.{CompletionIterator, NextIterator}
/** Used to identify the state store for a given operator. */
case class StatefulOperatorStateInfo(
checkpointLocation: String,
queryRunId: UUID,
operatorId: Long,
storeVersion: Long,
numPartitions: Int) {
override def toString(): String = {
s"state info [ checkpoint = $checkpointLocation, runId = $queryRunId, " +
s"opId = $operatorId, ver = $storeVersion, numPartitions = $numPartitions]"
}
}
/**
* An operator that reads or writes state from the [[StateStore]].
* The [[StatefulOperatorStateInfo]] should be filled in by `prepareForExecution` in
* [[IncrementalExecution]].
*/
trait StatefulOperator extends SparkPlan {
def stateInfo: Option[StatefulOperatorStateInfo]
protected def getStateInfo: StatefulOperatorStateInfo = attachTree(this) {
stateInfo.getOrElse {
throw new IllegalStateException("State location not present for execution")
}
}
}
/** An operator that reads from a StateStore. */
trait StateStoreReader extends StatefulOperator {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
}
/** An operator that writes to a StateStore. */
trait StateStoreWriter extends StatefulOperator { self: SparkPlan =>
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"numTotalStateRows" -> SQLMetrics.createMetric(sparkContext, "number of total state rows"),
"numUpdatedStateRows" -> SQLMetrics.createMetric(sparkContext, "number of updated state rows"),
"allUpdatesTimeMs" -> SQLMetrics.createTimingMetric(sparkContext, "total time to update rows"),
"allRemovalsTimeMs" -> SQLMetrics.createTimingMetric(sparkContext, "total time to remove rows"),
"commitTimeMs" -> SQLMetrics.createTimingMetric(sparkContext, "time to commit changes"),
"stateMemory" -> SQLMetrics.createSizeMetric(sparkContext, "memory used by state")
) ++ stateStoreCustomMetrics
/**
* Get the progress made by this stateful operator after execution. This should be called in
* the driver after this SparkPlan has been executed and metrics have been updated.
*/
def getProgress(): StateOperatorProgress = {
new StateOperatorProgress(
numRowsTotal = longMetric("numTotalStateRows").value,
numRowsUpdated = longMetric("numUpdatedStateRows").value,
memoryUsedBytes = longMetric("stateMemory").value)
}
/** Records the duration of running `body` for the next query progress update. */
protected def timeTakenMs(body: => Unit): Long = {
val startTime = System.nanoTime()
val result = body
val endTime = System.nanoTime()
math.max(NANOSECONDS.toMillis(endTime - startTime), 0)
}
/**
* Set the SQL metrics related to the state store.
* This should be called in that task after the store has been updated.
*/
protected def setStoreMetrics(store: StateStore): Unit = {
val storeMetrics = store.metrics
longMetric("numTotalStateRows") += storeMetrics.numKeys
longMetric("stateMemory") += storeMetrics.memoryUsedBytes
storeMetrics.customMetrics.foreach { case (metric, value) =>
longMetric(metric.name) += value
}
}
private def stateStoreCustomMetrics: Map[String, SQLMetric] = {
val provider = StateStoreProvider.create(sqlContext.conf.stateStoreProviderClass)
provider.supportedCustomMetrics.map {
case StateStoreCustomSizeMetric(name, desc) =>
name -> SQLMetrics.createSizeMetric(sparkContext, desc)
case StateStoreCustomTimingMetric(name, desc) =>
name -> SQLMetrics.createTimingMetric(sparkContext, desc)
}.toMap
}
/**
* Should the MicroBatchExecution run another batch based on this stateful operator and the
* current updated metadata.
*/
def shouldRunAnotherBatch(newMetadata: OffsetSeqMetadata): Boolean = false
}
/** An operator that supports watermark. */
trait WatermarkSupport extends UnaryExecNode {
/** The keys that may have a watermark attribute. */
def keyExpressions: Seq[Attribute]
/** The watermark value. */
def eventTimeWatermark: Option[Long]
/** Generate an expression that matches data older than the watermark */
lazy val watermarkExpression: Option[Expression] = {
WatermarkSupport.watermarkExpression(
child.output.find(_.metadata.contains(EventTimeWatermark.delayKey)),
eventTimeWatermark)
}
/** Predicate based on keys that matches data older than the watermark */
lazy val watermarkPredicateForKeys: Option[Predicate] = watermarkExpression.flatMap { e =>
if (keyExpressions.exists(_.metadata.contains(EventTimeWatermark.delayKey))) {
Some(newPredicate(e, keyExpressions))
} else {
None
}
}
/** Predicate based on the child output that matches data older than the watermark. */
lazy val watermarkPredicateForData: Option[Predicate] =
watermarkExpression.map(newPredicate(_, child.output))
protected def removeKeysOlderThanWatermark(store: StateStore): Unit = {
if (watermarkPredicateForKeys.nonEmpty) {
store.getRange(None, None).foreach { rowPair =>
if (watermarkPredicateForKeys.get.eval(rowPair.key)) {
store.remove(rowPair.key)
}
}
}
}
}
object WatermarkSupport {
/** Generate an expression on given attributes that matches data older than the watermark */
def watermarkExpression(
optionalWatermarkExpression: Option[Expression],
optionalWatermarkMs: Option[Long]): Option[Expression] = {
if (optionalWatermarkExpression.isEmpty || optionalWatermarkMs.isEmpty) return None
val watermarkAttribute = optionalWatermarkExpression.get
// If we are evicting based on a window, use the end of the window. Otherwise just
// use the attribute itself.
val evictionExpression =
if (watermarkAttribute.dataType.isInstanceOf[StructType]) {
LessThanOrEqual(
GetStructField(watermarkAttribute, 1),
Literal(optionalWatermarkMs.get * 1000))
} else {
LessThanOrEqual(
watermarkAttribute,
Literal(optionalWatermarkMs.get * 1000))
}
Some(evictionExpression)
}
}
/**
* For each input tuple, the key is calculated and the value from the [[StateStore]] is added
* to the stream (in addition to the input tuple) if present.
*/
case class StateStoreRestoreExec(
keyExpressions: Seq[Attribute],
stateInfo: Option[StatefulOperatorStateInfo],
child: SparkPlan)
extends UnaryExecNode with StateStoreReader {
override protected def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
child.execute().mapPartitionsWithStateStore(
getStateInfo,
keyExpressions.toStructType,
child.output.toStructType,
indexOrdinal = None,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)) { case (store, iter) =>
val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
val hasInput = iter.hasNext
if (!hasInput && keyExpressions.isEmpty) {
// If our `keyExpressions` are empty, we're getting a global aggregation. In that case
// the `HashAggregateExec` will output a 0 value for the partial merge. We need to
// restore the value, so that we don't overwrite our state with a 0 value, but rather
// merge the 0 with existing state.
store.iterator().map(_.value)
} else {
iter.flatMap { row =>
val key = getKey(row)
val savedState = store.get(key)
numOutputRows += 1
Option(savedState).toSeq :+ row
}
}
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] = {
if (keyExpressions.isEmpty) {
AllTuples :: Nil
} else {
ClusteredDistribution(keyExpressions, stateInfo.map(_.numPartitions)) :: Nil
}
}
}
/**
* For each input tuple, the key is calculated and the tuple is `put` into the [[StateStore]].
*/
case class StateStoreSaveExec(
keyExpressions: Seq[Attribute],
stateInfo: Option[StatefulOperatorStateInfo] = None,
outputMode: Option[OutputMode] = None,
eventTimeWatermark: Option[Long] = None,
child: SparkPlan)
extends UnaryExecNode with StateStoreWriter with WatermarkSupport {
override protected def doExecute(): RDD[InternalRow] = {
metrics // force lazy init at driver
assert(outputMode.nonEmpty,
"Incorrect planning in IncrementalExecution, outputMode has not been set")
child.execute().mapPartitionsWithStateStore(
getStateInfo,
keyExpressions.toStructType,
child.output.toStructType,
indexOrdinal = None,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)) { (store, iter) =>
val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
val numOutputRows = longMetric("numOutputRows")
val numUpdatedStateRows = longMetric("numUpdatedStateRows")
val allUpdatesTimeMs = longMetric("allUpdatesTimeMs")
val allRemovalsTimeMs = longMetric("allRemovalsTimeMs")
val commitTimeMs = longMetric("commitTimeMs")
outputMode match {
// Update and output all rows in the StateStore.
case Some(Complete) =>
allUpdatesTimeMs += timeTakenMs {
while (iter.hasNext) {
val row = iter.next().asInstanceOf[UnsafeRow]
val key = getKey(row)
store.put(key, row)
numUpdatedStateRows += 1
}
}
allRemovalsTimeMs += 0
commitTimeMs += timeTakenMs {
store.commit()
}
setStoreMetrics(store)
store.iterator().map { rowPair =>
numOutputRows += 1
rowPair.value
}
// Update and output only rows being evicted from the StateStore
// Assumption: watermark predicates must be non-empty if append mode is allowed
case Some(Append) =>
allUpdatesTimeMs += timeTakenMs {
val filteredIter = iter.filter(row => !watermarkPredicateForData.get.eval(row))
while (filteredIter.hasNext) {
val row = filteredIter.next().asInstanceOf[UnsafeRow]
val key = getKey(row)
store.put(key, row)
numUpdatedStateRows += 1
}
}
val removalStartTimeNs = System.nanoTime
val rangeIter = store.getRange(None, None)
new NextIterator[InternalRow] {
override protected def getNext(): InternalRow = {
var removedValueRow: InternalRow = null
while(rangeIter.hasNext && removedValueRow == null) {
val rowPair = rangeIter.next()
if (watermarkPredicateForKeys.get.eval(rowPair.key)) {
store.remove(rowPair.key)
removedValueRow = rowPair.value
}
}
if (removedValueRow == null) {
finished = true
null
} else {
removedValueRow
}
}
override protected def close(): Unit = {
allRemovalsTimeMs += NANOSECONDS.toMillis(System.nanoTime - removalStartTimeNs)
commitTimeMs += timeTakenMs { store.commit() }
setStoreMetrics(store)
}
}
// Update and output modified rows from the StateStore.
case Some(Update) =>
new NextIterator[InternalRow] {
// Filter late date using watermark if specified
private[this] val baseIterator = watermarkPredicateForData match {
case Some(predicate) => iter.filter((row: InternalRow) => !predicate.eval(row))
case None => iter
}
private val updatesStartTimeNs = System.nanoTime
override protected def getNext(): InternalRow = {
if (baseIterator.hasNext) {
val row = baseIterator.next().asInstanceOf[UnsafeRow]
val key = getKey(row)
store.put(key, row)
numOutputRows += 1
numUpdatedStateRows += 1
row
} else {
finished = true
null
}
}
override protected def close(): Unit = {
allUpdatesTimeMs += NANOSECONDS.toMillis(System.nanoTime - updatesStartTimeNs)
// Remove old aggregates if watermark specified
allRemovalsTimeMs += timeTakenMs { removeKeysOlderThanWatermark(store) }
commitTimeMs += timeTakenMs { store.commit() }
setStoreMetrics(store)
}
}
case _ => throw new UnsupportedOperationException(s"Invalid output mode: $outputMode")
}
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] = {
if (keyExpressions.isEmpty) {
AllTuples :: Nil
} else {
ClusteredDistribution(keyExpressions, stateInfo.map(_.numPartitions)) :: Nil
}
}
override def shouldRunAnotherBatch(newMetadata: OffsetSeqMetadata): Boolean = {
(outputMode.contains(Append) || outputMode.contains(Update)) &&
eventTimeWatermark.isDefined &&
newMetadata.batchWatermarkMs > eventTimeWatermark.get
}
}
/** Physical operator for executing streaming Deduplicate. */
case class StreamingDeduplicateExec(
keyExpressions: Seq[Attribute],
child: SparkPlan,
stateInfo: Option[StatefulOperatorStateInfo] = None,
eventTimeWatermark: Option[Long] = None)
extends UnaryExecNode with StateStoreWriter with WatermarkSupport {
/** Distribute by grouping attributes */
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(keyExpressions, stateInfo.map(_.numPartitions)) :: Nil
override protected def doExecute(): RDD[InternalRow] = {
metrics // force lazy init at driver
child.execute().mapPartitionsWithStateStore(
getStateInfo,
keyExpressions.toStructType,
child.output.toStructType,
indexOrdinal = None,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)) { (store, iter) =>
val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
val numOutputRows = longMetric("numOutputRows")
val numTotalStateRows = longMetric("numTotalStateRows")
val numUpdatedStateRows = longMetric("numUpdatedStateRows")
val allUpdatesTimeMs = longMetric("allUpdatesTimeMs")
val allRemovalsTimeMs = longMetric("allRemovalsTimeMs")
val commitTimeMs = longMetric("commitTimeMs")
val baseIterator = watermarkPredicateForData match {
case Some(predicate) => iter.filter(row => !predicate.eval(row))
case None => iter
}
val updatesStartTimeNs = System.nanoTime
val result = baseIterator.filter { r =>
val row = r.asInstanceOf[UnsafeRow]
val key = getKey(row)
val value = store.get(key)
if (value == null) {
store.put(key, StreamingDeduplicateExec.EMPTY_ROW)
numUpdatedStateRows += 1
numOutputRows += 1
true
} else {
// Drop duplicated rows
false
}
}
CompletionIterator[InternalRow, Iterator[InternalRow]](result, {
allUpdatesTimeMs += NANOSECONDS.toMillis(System.nanoTime - updatesStartTimeNs)
allRemovalsTimeMs += timeTakenMs { removeKeysOlderThanWatermark(store) }
commitTimeMs += timeTakenMs { store.commit() }
setStoreMetrics(store)
})
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def shouldRunAnotherBatch(newMetadata: OffsetSeqMetadata): Boolean = {
eventTimeWatermark.isDefined && newMetadata.batchWatermarkMs > eventTimeWatermark.get
}
}
object StreamingDeduplicateExec {
private val EMPTY_ROW =
UnsafeProjection.create(Array[DataType](NullType)).apply(InternalRow.apply(null))
}
| szhem/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/statefulOperators.scala | Scala | apache-2.0 | 18,423 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.lang.reflect.ParameterizedType
import scala.reflect.runtime.universe.TypeTag
import scala.util.Try
import org.apache.spark.annotation.Stable
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.internal.Logging
import org.apache.spark.sql.api.java._
import org.apache.spark.sql.catalyst.{JavaTypeInference, ScalaReflection}
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.{Expression, ScalaUDF}
import org.apache.spark.sql.catalyst.util.CharVarcharUtils
import org.apache.spark.sql.execution.aggregate.ScalaUDAF
import org.apache.spark.sql.execution.python.UserDefinedPythonFunction
import org.apache.spark.sql.expressions.{SparkUserDefinedFunction, UserDefinedAggregateFunction, UserDefinedAggregator, UserDefinedFunction}
import org.apache.spark.sql.types.DataType
import org.apache.spark.util.Utils
/**
* Functions for registering user-defined functions. Use `SparkSession.udf` to access this:
*
* {{{
* spark.udf
* }}}
*
* @since 1.3.0
*/
@Stable
class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) extends Logging {
import UDFRegistration._
protected[sql] def registerPython(name: String, udf: UserDefinedPythonFunction): Unit = {
log.debug(
s"""
| Registering new PythonUDF:
| name: $name
| command: ${udf.func.command.toSeq}
| envVars: ${udf.func.envVars}
| pythonIncludes: ${udf.func.pythonIncludes}
| pythonExec: ${udf.func.pythonExec}
| dataType: ${udf.dataType}
| pythonEvalType: ${PythonEvalType.toString(udf.pythonEvalType)}
| udfDeterministic: ${udf.udfDeterministic}
""".stripMargin)
functionRegistry.createOrReplaceTempFunction(name, udf.builder)
}
/**
* Registers a user-defined aggregate function (UDAF).
*
* @param name the name of the UDAF.
* @param udaf the UDAF needs to be registered.
* @return the registered UDAF.
*
* @since 1.5.0
* @deprecated this method and the use of UserDefinedAggregateFunction are deprecated.
* Aggregator[IN, BUF, OUT] should now be registered as a UDF via the functions.udaf(agg) method.
*/
@deprecated("Aggregator[IN, BUF, OUT] should now be registered as a UDF" +
" via the functions.udaf(agg) method.", "3.0.0")
def register(name: String, udaf: UserDefinedAggregateFunction): UserDefinedAggregateFunction = {
def builder(children: Seq[Expression]) = ScalaUDAF(children, udaf)
functionRegistry.createOrReplaceTempFunction(name, builder)
udaf
}
/**
* Registers a user-defined function (UDF), for a UDF that's already defined using the Dataset
* API (i.e. of type UserDefinedFunction). To change a UDF to nondeterministic, call the API
* `UserDefinedFunction.asNondeterministic()`. To change a UDF to nonNullable, call the API
* `UserDefinedFunction.asNonNullable()`.
*
* Example:
* {{{
* val foo = udf(() => Math.random())
* spark.udf.register("random", foo.asNondeterministic())
*
* val bar = udf(() => "bar")
* spark.udf.register("stringLit", bar.asNonNullable())
* }}}
*
* @param name the name of the UDF.
* @param udf the UDF needs to be registered.
* @return the registered UDF.
*
* @since 2.2.0
*/
def register(name: String, udf: UserDefinedFunction): UserDefinedFunction = {
udf match {
case udaf: UserDefinedAggregator[_, _, _] =>
def builder(children: Seq[Expression]) = udaf.scalaAggregator(children)
functionRegistry.createOrReplaceTempFunction(name, builder)
udf
case _ =>
def builder(children: Seq[Expression]) = udf.apply(children.map(Column.apply) : _*).expr
functionRegistry.createOrReplaceTempFunction(name, builder)
udf
}
}
// scalastyle:off line.size.limit
/* register 0-22 were generated by this script
(0 to 22).foreach { x =>
val types = (1 to x).foldRight("RT")((i, s) => {s"A$i, $s"})
val typeTags = (1 to x).map(i => s"A$i: TypeTag").foldLeft("RT: TypeTag")(_ + ", " + _)
val inputEncoders = (1 to x).foldRight("Nil")((i, s) => {s"Try(ExpressionEncoder[A$i]()).toOption :: $s"})
println(s"""
|/**
| * Registers a deterministic Scala closure of $x arguments as user-defined function (UDF).
| * @tparam RT return type of UDF.
| * @since 1.3.0
| */
|def register[$typeTags](name: String, func: Function$x[$types]): UserDefinedFunction = {
| val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
| val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
| val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = $inputEncoders
| val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
| val finalUdf = if (nullable) udf else udf.asNonNullable()
| def builder(e: Seq[Expression]) = if (e.length == $x) {
| finalUdf.createScalaUDF(e)
| } else {
| throw new AnalysisException("Invalid number of arguments for function " + name +
| ". Expected: $x; Found: " + e.length)
| }
| functionRegistry.createOrReplaceTempFunction(name, builder)
| finalUdf
|}""".stripMargin)
}
(0 to 22).foreach { i =>
val extTypeArgs = (0 to i).map(_ => "_").mkString(", ")
val anyTypeArgs = (0 to i).map(_ => "Any").mkString(", ")
val anyCast = s".asInstanceOf[UDF$i[$anyTypeArgs]]"
val anyParams = (1 to i).map(_ => "_: Any").mkString(", ")
val version = if (i == 0) "2.3.0" else "1.3.0"
val funcCall = if (i == 0) s"() => f$anyCast.call($anyParams)" else s"f$anyCast.call($anyParams)"
println(s"""
|/**
| * Register a deterministic Java UDF$i instance as user-defined function (UDF).
| * @since $version
| */
|def register(name: String, f: UDF$i[$extTypeArgs], returnType: DataType): Unit = {
| val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
| val func = $funcCall
| def builder(e: Seq[Expression]) = if (e.length == $i) {
| ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
| } else {
| throw new AnalysisException("Invalid number of arguments for function " + name +
| ". Expected: $i; Found: " + e.length)
| }
| functionRegistry.createOrReplaceTempFunction(name, builder)
|}""".stripMargin)
}
*/
/**
* Registers a deterministic Scala closure of 0 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag](name: String, func: Function0[RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 0) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 0; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 1 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag](name: String, func: Function1[A1, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 1) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 1; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 2 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag](name: String, func: Function2[A1, A2, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 2) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 2; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 3 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag](name: String, func: Function3[A1, A2, A3, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 3) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 3; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 4 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag](name: String, func: Function4[A1, A2, A3, A4, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 4) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 4; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 5 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag](name: String, func: Function5[A1, A2, A3, A4, A5, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 5) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 5; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 6 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag](name: String, func: Function6[A1, A2, A3, A4, A5, A6, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 6) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 6; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 7 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag](name: String, func: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 7) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 7; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 8 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag](name: String, func: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 8) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 8; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 9 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag](name: String, func: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 9) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 9; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 10 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag](name: String, func: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 10) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 10; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 11 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag](name: String, func: Function11[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 11) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 11; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 12 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag](name: String, func: Function12[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 12) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 12; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 13 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag](name: String, func: Function13[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 13) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 13; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 14 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag](name: String, func: Function14[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 14) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 14; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 15 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag](name: String, func: Function15[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 15) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 15; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 16 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag](name: String, func: Function16[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 16) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 16; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 17 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag](name: String, func: Function17[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 17) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 17; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 18 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag](name: String, func: Function18[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Try(ExpressionEncoder[A18]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 18) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 18; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 19 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag](name: String, func: Function19[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Try(ExpressionEncoder[A18]()).toOption :: Try(ExpressionEncoder[A19]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 19) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 19; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 20 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag](name: String, func: Function20[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Try(ExpressionEncoder[A18]()).toOption :: Try(ExpressionEncoder[A19]()).toOption :: Try(ExpressionEncoder[A20]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 20) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 20; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 21 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag, A21: TypeTag](name: String, func: Function21[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Try(ExpressionEncoder[A18]()).toOption :: Try(ExpressionEncoder[A19]()).toOption :: Try(ExpressionEncoder[A20]()).toOption :: Try(ExpressionEncoder[A21]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 21) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 21; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
/**
* Registers a deterministic Scala closure of 22 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
* @since 1.3.0
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag, A21: TypeTag, A22: TypeTag](name: String, func: Function22[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(outputSchema).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders: Seq[Option[ExpressionEncoder[_]]] = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Try(ExpressionEncoder[A11]()).toOption :: Try(ExpressionEncoder[A12]()).toOption :: Try(ExpressionEncoder[A13]()).toOption :: Try(ExpressionEncoder[A14]()).toOption :: Try(ExpressionEncoder[A15]()).toOption :: Try(ExpressionEncoder[A16]()).toOption :: Try(ExpressionEncoder[A17]()).toOption :: Try(ExpressionEncoder[A18]()).toOption :: Try(ExpressionEncoder[A19]()).toOption :: Try(ExpressionEncoder[A20]()).toOption :: Try(ExpressionEncoder[A21]()).toOption :: Try(ExpressionEncoder[A22]()).toOption :: Nil
val udf = SparkUserDefinedFunction(func, dataType, inputEncoders, outputEncoder).withName(name)
val finalUdf = if (nullable) udf else udf.asNonNullable()
def builder(e: Seq[Expression]) = if (e.length == 22) {
finalUdf.createScalaUDF(e)
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 22; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
finalUdf
}
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Register a Java UDF class using reflection, for use from pyspark
*
* @param name udf name
* @param className fully qualified class name of udf
* @param returnDataType return type of udf. If it is null, spark would try to infer
* via reflection.
*/
private[sql] def registerJava(name: String, className: String, returnDataType: DataType): Unit = {
try {
val clazz = Utils.classForName[AnyRef](className)
val udfInterfaces = clazz.getGenericInterfaces
.filter(_.isInstanceOf[ParameterizedType])
.map(_.asInstanceOf[ParameterizedType])
.filter(e => e.getRawType.isInstanceOf[Class[_]] && e.getRawType.asInstanceOf[Class[_]].getCanonicalName.startsWith("org.apache.spark.sql.api.java.UDF"))
if (udfInterfaces.length == 0) {
throw new AnalysisException(s"UDF class $className doesn't implement any UDF interface")
} else if (udfInterfaces.length > 1) {
throw new AnalysisException(s"It is invalid to implement multiple UDF interfaces, UDF class $className")
} else {
try {
val udf = clazz.getConstructor().newInstance()
val udfReturnType = udfInterfaces(0).getActualTypeArguments.last
var returnType = returnDataType
if (returnType == null) {
returnType = JavaTypeInference.inferDataType(udfReturnType)._1
}
udfInterfaces(0).getActualTypeArguments.length match {
case 1 => register(name, udf.asInstanceOf[UDF0[_]], returnType)
case 2 => register(name, udf.asInstanceOf[UDF1[_, _]], returnType)
case 3 => register(name, udf.asInstanceOf[UDF2[_, _, _]], returnType)
case 4 => register(name, udf.asInstanceOf[UDF3[_, _, _, _]], returnType)
case 5 => register(name, udf.asInstanceOf[UDF4[_, _, _, _, _]], returnType)
case 6 => register(name, udf.asInstanceOf[UDF5[_, _, _, _, _, _]], returnType)
case 7 => register(name, udf.asInstanceOf[UDF6[_, _, _, _, _, _, _]], returnType)
case 8 => register(name, udf.asInstanceOf[UDF7[_, _, _, _, _, _, _, _]], returnType)
case 9 => register(name, udf.asInstanceOf[UDF8[_, _, _, _, _, _, _, _, _]], returnType)
case 10 => register(name, udf.asInstanceOf[UDF9[_, _, _, _, _, _, _, _, _, _]], returnType)
case 11 => register(name, udf.asInstanceOf[UDF10[_, _, _, _, _, _, _, _, _, _, _]], returnType)
case 12 => register(name, udf.asInstanceOf[UDF11[_, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 13 => register(name, udf.asInstanceOf[UDF12[_, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 14 => register(name, udf.asInstanceOf[UDF13[_, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 15 => register(name, udf.asInstanceOf[UDF14[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 16 => register(name, udf.asInstanceOf[UDF15[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 17 => register(name, udf.asInstanceOf[UDF16[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 18 => register(name, udf.asInstanceOf[UDF17[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 19 => register(name, udf.asInstanceOf[UDF18[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 20 => register(name, udf.asInstanceOf[UDF19[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 21 => register(name, udf.asInstanceOf[UDF20[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 22 => register(name, udf.asInstanceOf[UDF21[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case 23 => register(name, udf.asInstanceOf[UDF22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], returnType)
case n =>
throw new AnalysisException(s"UDF class with $n type arguments is not supported.")
}
} catch {
case e @ (_: InstantiationException | _: IllegalArgumentException) =>
throw new AnalysisException(s"Can not instantiate class $className, please make sure it has public non argument constructor")
}
}
} catch {
case e: ClassNotFoundException => throw new AnalysisException(s"Can not load class $className, please make sure it is on the classpath")
}
}
/**
* Register a Java UDAF class using reflection, for use from pyspark
*
* @param name UDAF name
* @param className fully qualified class name of UDAF
*/
private[sql] def registerJavaUDAF(name: String, className: String): Unit = {
try {
val clazz = Utils.classForName[AnyRef](className)
if (!classOf[UserDefinedAggregateFunction].isAssignableFrom(clazz)) {
throw new AnalysisException(s"class $className doesn't implement interface UserDefinedAggregateFunction")
}
val udaf = clazz.getConstructor().newInstance().asInstanceOf[UserDefinedAggregateFunction]
register(name, udaf)
} catch {
case e: ClassNotFoundException => throw new AnalysisException(s"Can not load class ${className}, please make sure it is on the classpath")
case e @ (_: InstantiationException | _: IllegalArgumentException) =>
throw new AnalysisException(s"Can not instantiate class ${className}, please make sure it has public non argument constructor")
}
}
/**
* Register a deterministic Java UDF0 instance as user-defined function (UDF).
* @since 2.3.0
*/
def register(name: String, f: UDF0[_], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = () => f.asInstanceOf[UDF0[Any]].call()
def builder(e: Seq[Expression]) = if (e.length == 0) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 0; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF1 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF1[_, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF1[Any, Any]].call(_: Any)
def builder(e: Seq[Expression]) = if (e.length == 1) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 1; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF2 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF2[_, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF2[Any, Any, Any]].call(_: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 2) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 2; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF3 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF3[_, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF3[Any, Any, Any, Any]].call(_: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 3) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 3; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF4 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF4[_, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF4[Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 4) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 4; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF5 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF5[_, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF5[Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 5) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 5; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF6 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF6[_, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF6[Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 6) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 6; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF7 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF7[_, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF7[Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 7) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 7; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF8 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF8[_, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF8[Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 8) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 8; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF9 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF9[_, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF9[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 9) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 9; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF10 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 10) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 10; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF11 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF11[_, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF11[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 11) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 11; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF12 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF12[_, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF12[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 12) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 12; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF13 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF13[_, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF13[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 13) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 13; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF14 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF14[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF14[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 14) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 14; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF15 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF15[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF15[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 15) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 15; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF16 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF16[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF16[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 16) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 16; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF17 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF17[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF17[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 17) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 17; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF18 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF18[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF18[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 18) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 18; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF19 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF19[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF19[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 19) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 19; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF20 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF20[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF20[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 20) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 20; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF21 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF21[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF21[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 21) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 21; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
/**
* Register a deterministic Java UDF22 instance as user-defined function (UDF).
* @since 1.3.0
*/
def register(name: String, f: UDF22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType): Unit = {
val replaced = CharVarcharUtils.failIfHasCharVarchar(returnType)
val func = f.asInstanceOf[UDF22[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
def builder(e: Seq[Expression]) = if (e.length == 22) {
ScalaUDF(func, replaced, e, Nil, udfName = Some(name))
} else {
throw new AnalysisException("Invalid number of arguments for function " + name +
". Expected: 22; Found: " + e.length)
}
functionRegistry.createOrReplaceTempFunction(name, builder)
}
// scalastyle:on line.size.limit
}
private[sql] object UDFRegistration {
/**
* Obtaining the schema of output encoder for `ScalaUDF`.
*
* As the serialization in `ScalaUDF` is for individual column, not the whole row,
* we just take the data type of vanilla object serializer, not `serializer` which
* is transformed somehow for top-level row.
*/
def outputSchema(outputEncoder: ExpressionEncoder[_]): ScalaReflection.Schema = {
ScalaReflection.Schema(outputEncoder.objSerializer.dataType,
outputEncoder.objSerializer.nullable)
}
}
| witgo/spark | sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala | Scala | apache-2.0 | 70,319 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala
import org.apache.camel.test.junit4.CamelTestSupport
import org.apache.camel.scala.dsl.builder.{RouteBuilder,RouteBuilderSupport}
import org.junit.Test
class CamelTestSupportTest extends CamelTestSupport with RouteBuilderSupport {
override protected def createRouteBuilder = builder
@Test
def testValidRequest() {
val mock = getMockEndpoint("mock:output")
val message = "HelloWorld"
mock.expectedBodiesReceived(message)
template.sendBody("direct:start", message)
assertMockEndpointsSatisfied()
}
val builder = new RouteBuilder {
"direct:start" to "mock:output"
}
} | sabre1041/camel | components/camel-scala/src/test/scala/org/apache/camel/scala/CamelTestSupportTest.scala | Scala | apache-2.0 | 1,449 |
package one.lockstep.lock.client
trait LockParams
case class BasicLockParams(lockoutThreshold: Int) extends LockParams
| lockstep-one/vault | vault-client/src/main/scala/one/lockstep/lock/client/LockParams.scala | Scala | agpl-3.0 | 121 |
package org.kokho.scheduling
/**
* Created with IntelliJ IDEA on 5/28/15.
* @author: Mikhail Kokho
*
* Task is an immutable object that produces an infinite sequence of jobs.
*
* A task is specified by four parameters:
* offset - the release time of the first job
* execution - the amount of time required to process a job
* deadline - a relative deadline of a job
* period - minimal period of time after which a next job can be released
*
*
*/
trait Task {
/**
* the type of jobs released by this task
*/
type JobType <: Job
def offset: Int
def execution: Int
def deadline: Int
def period: Int
final def utilization: Double = execution.toDouble / deadline
def name: String = "T"
/**
* Generates the jobs of this task starting at time $from
*
* @param from a non negative number that bounds the release time of the first job.
* All produces jobs has release time greater than $from
* @return an iterator of jobs
* @throws IllegalArgumentException if $from is smaller than 0
*/
def jobs(from: Int): Iterator[JobType]
/**
* Generates all jobs released by this task
*/
def jobs(): Iterator[JobType] = jobs(this.offset)
/**
* Drops $n jobs and returns the next job
*
* @param n the number of jobs to drop. Must be non negative
*/
def job(n: Int): JobType = {
require(n >= 0)
jobs().drop(n).next()
}
override def toString: String =
if (offset == 0 && deadline == period)
s"$name($execution in $period)"
else
s"$name($offset, $execution, $deadline, $period)"
}
| mkokho/dynoslack | src/main/scala/org/kokho/scheduling/Task.scala | Scala | apache-2.0 | 1,663 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.optimizer
import scala.language.implicitConversions
import scala.annotation.switch
import scala.collection.mutable
import org.scalajs.core.ir._
import Definitions._
import Trees._
import Types._
import org.scalajs.core.tools.logging._
/** Checker for the validity of the IR. */
class IRChecker(unit: LinkingUnit, logger: Logger) {
import IRChecker._
private var _errorCount: Int = 0
def errorCount: Int = _errorCount
private val classes: mutable.Map[String, CheckedClass] = {
val tups = for (classDef <- unit.classDefs) yield {
implicit val ctx = ErrorContext(classDef)
val c = new CheckedClass(classDef)
c.name -> c
}
mutable.Map(tups: _*)
}
def check(): Boolean = {
for (classDef <- unit.classDefs) {
implicit val ctx = ErrorContext(classDef)
checkStaticMembers(classDef)
classDef.kind match {
case ClassKind.RawJSType =>
if (classDef.fields.nonEmpty ||
classDef.memberMethods.nonEmpty ||
classDef.abstractMethods.nonEmpty ||
classDef.exportedMembers.nonEmpty ||
classDef.classExports.nonEmpty) {
reportError(s"Raw JS type ${classDef.name} cannot "+
"have instance members")
}
case ClassKind.Interface =>
if (classDef.fields.nonEmpty ||
classDef.memberMethods.nonEmpty ||
classDef.exportedMembers.nonEmpty ||
classDef.classExports.nonEmpty) {
reportError(s"Interface ${classDef.name} cannot "+
"have concrete instance members")
}
case _ =>
checkScalaClassDef(classDef)
}
}
errorCount == 0
}
def checkStaticMembers(classDef: LinkedClass): Unit = {
for (member <- classDef.staticMethods) {
val methodDef = member.tree
implicit val ctx = ErrorContext(methodDef)
assert(methodDef.static, "Found non-static member in static defs")
if (!methodDef.name.isInstanceOf[Ident])
reportError(s"Static method ${methodDef.name} cannot be exported")
else
checkMethodDef(methodDef, classDef)
}
}
def checkScalaClassDef(classDef: LinkedClass): Unit = {
assert(classDef.kind != ClassKind.RawJSType &&
classDef.kind != ClassKind.Interface)
// Is this a normal class?
if (classDef.kind != ClassKind.HijackedClass) {
// Check fields
for (field <- classDef.fields) {
implicit val ctx = ErrorContext(field)
checkFieldDef(field, classDef)
}
// Check exported members
for (member <- classDef.exportedMembers) {
implicit val ctx = ErrorContext(member.tree)
member.tree match {
case m: MethodDef =>
assert(m.name.isInstanceOf[StringLiteral],
"Exported method must have StringLiteral as name")
checkExportedMethodDef(m, classDef)
case p: PropertyDef =>
assert(p.name.isInstanceOf[StringLiteral],
"Exported property must have StringLiteral as name")
checkExportedPropertyDef(p, classDef)
// Anything else is illegal
case _ =>
reportError("Illegal exported class member of type " +
member.tree.getClass.getName)
}
}
// Check classExports
for (tree <- classDef.classExports) {
implicit val ctx = ErrorContext(tree)
tree match {
case member @ ConstructorExportDef(_, _, _) =>
checkConstructorExportDef(member, classDef)
case member @ ModuleExportDef(_) =>
checkModuleExportDef(member, classDef)
// Anything else is illegal
case _ =>
reportError("Illegal class export of type " +
tree.getClass.getName)
}
}
} else {
implicit val ctx = ErrorContext(classDef)
if (classDef.fields.nonEmpty)
reportError("Hijacked classes may not have fields")
if (classDef.exportedMembers.nonEmpty || classDef.classExports.nonEmpty)
reportError("Hijacked classes may not have exports")
}
// Check methods
for (method <- classDef.memberMethods ++ classDef.abstractMethods) {
val tree = method.tree
implicit val ctx = ErrorContext(tree)
assert(!tree.static, "Member or abstract method may not be static")
assert(tree.name.isInstanceOf[Ident],
"Normal method must have Ident as name")
checkMethodDef(tree, classDef)
}
}
def checkFieldDef(fieldDef: FieldDef, classDef: LinkedClass): Unit = {
val FieldDef(name, tpe, mutable) = fieldDef
implicit val ctx = ErrorContext(fieldDef)
if (tpe == NoType)
reportError(s"FieldDef cannot have type NoType")
}
def checkMethodDef(methodDef: MethodDef, classDef: LinkedClass): Unit = {
val MethodDef(static, Ident(name, _), params, resultType, body) = methodDef
implicit val ctx = ErrorContext(methodDef)
for (ParamDef(name, tpe, _) <- params)
if (tpe == NoType)
reportError(s"Parameter $name has type NoType")
val isConstructor = isConstructorName(name)
val resultTypeForSig =
if (isConstructor) NoType
else resultType
val advertizedSig = (params.map(_.ptpe), resultTypeForSig)
val sigFromName = inferMethodType(name, static)
if (advertizedSig != sigFromName) {
reportError(
s"The signature of ${classDef.name.name}.$name, which is "+
s"$advertizedSig, does not match its name (should be $sigFromName).")
}
if (body == EmptyTree) {
// Abstract
if (static)
reportError(s"Static method ${classDef.name.name}.$name cannot be abstract")
else if (isConstructor)
reportError(s"Constructor ${classDef.name.name}.$name cannot be abstract")
} else {
// Concrete
val thisType =
if (static) NoType
else ClassType(classDef.name.name)
val bodyEnv = Env.fromSignature(thisType, params, resultType, isConstructor)
if (resultType == NoType)
typecheckStat(body, bodyEnv)
else
typecheckExpect(body, bodyEnv, resultType)
}
}
def checkExportedMethodDef(methodDef: MethodDef,
classDef: LinkedClass): Unit = {
val MethodDef(static, StringLiteral(name), params, resultType, body) = methodDef
implicit val ctx = ErrorContext(methodDef)
if (!classDef.kind.isClass) {
reportError(s"Exported method def can only appear in a class")
return
}
if (static)
reportError("Exported method def cannot be static")
if (name.contains("__") && name != Definitions.ExportedConstructorsName)
reportError("Exported method def name cannot contain __")
for (ParamDef(name, tpe, _) <- params) {
if (tpe == NoType)
reportError(s"Parameter $name has type NoType")
else if (tpe != AnyType)
reportError(s"Parameter $name of exported method def has type $tpe, "+
"but must be Any")
}
if (resultType != AnyType) {
reportError(s"Result type of exported method def is $resultType, "+
"but must be Any")
}
val thisType = ClassType(classDef.name.name)
val bodyEnv = Env.fromSignature(thisType, params, resultType)
.withArgumentsVar(methodDef.pos)
typecheckExpect(body, bodyEnv, resultType)
}
def checkExportedPropertyDef(propDef: PropertyDef,
classDef: LinkedClass): Unit = {
val PropertyDef(_, getterBody, setterArg, setterBody) = propDef
implicit val ctx = ErrorContext(propDef)
if (!classDef.kind.isClass) {
reportError(s"Exported property def can only appear in a class")
return
}
val thisType = ClassType(classDef.name.name)
if (getterBody != EmptyTree) {
val getterBodyEnv = Env.fromSignature(thisType, Nil, AnyType)
typecheckExpect(getterBody, getterBodyEnv, AnyType)
}
if (setterBody != EmptyTree) {
if (setterArg.ptpe != AnyType)
reportError("Setter argument of exported property def has type "+
s"${setterArg.ptpe}, but must be Any")
val setterBodyEnv = Env.fromSignature(thisType, List(setterArg), NoType)
typecheckStat(setterBody, setterBodyEnv)
}
}
def checkConstructorExportDef(ctorDef: ConstructorExportDef,
classDef: LinkedClass): Unit = {
val ConstructorExportDef(_, params, body) = ctorDef
implicit val ctx = ErrorContext(ctorDef)
if (!classDef.kind.isClass) {
reportError(s"Exported constructor def can only appear in a class")
return
}
for (ParamDef(name, tpe, _) <- params) {
if (tpe == NoType)
reportError(s"Parameter $name has type NoType")
else if (tpe != AnyType)
reportError(s"Parameter $name of exported constructor def has type "+
s"$tpe, but must be Any")
}
val thisType = ClassType(classDef.name.name)
val bodyEnv = Env.fromSignature(thisType, params, NoType)
.withArgumentsVar(ctorDef.pos)
typecheckStat(body, bodyEnv)
}
def checkModuleExportDef(moduleDef: ModuleExportDef,
classDef: LinkedClass): Unit = {
implicit val ctx = ErrorContext(moduleDef)
if (classDef.kind != ClassKind.ModuleClass)
reportError(s"Exported module def can only appear in a module class")
}
def typecheckStat(tree: Tree, env: Env): Env = {
implicit val ctx = ErrorContext(tree)
tree match {
case VarDef(ident, vtpe, mutable, rhs) =>
typecheckExpect(rhs, env, vtpe)
env.withLocal(LocalDef(ident.name, vtpe, mutable)(tree.pos))
case Skip() =>
env
case Assign(select, rhs) =>
select match {
case Select(This(), Ident(_, _)) if env.inConstructor =>
// ok
case Select(receiver, Ident(name, _)) =>
receiver.tpe match {
case ClassType(clazz) =>
for {
c <- tryLookupClass(clazz).right
f <- c.lookupField(name)
if !f.mutable
} reportError(s"Assignment to immutable field $name.")
case _ =>
}
case VarRef(Ident(name, _)) if !env.locals(name).mutable =>
reportError(s"Assignment to immutable variable $name.")
case _ =>
}
val lhsTpe = typecheckExpr(select, env)
val expectedRhsTpe = select match {
case _:JSDotSelect | _:JSBracketSelect => AnyType
case _ => lhsTpe
}
typecheckExpect(rhs, env, expectedRhsTpe)
env
case StoreModule(cls, value) =>
if (!cls.className.endsWith("$"))
reportError("StoreModule of non-module class $cls")
typecheckExpect(value, env, ClassType(cls.className))
env
case Block(stats) =>
(env /: stats) { (prevEnv, stat) =>
typecheckStat(stat, prevEnv)
}
env
case Labeled(label, NoType, body) =>
typecheckStat(body, env.withLabeledReturnType(label.name, AnyType))
env
case If(cond, thenp, elsep) =>
typecheckExpect(cond, env, BooleanType)
typecheckStat(thenp, env)
typecheckStat(elsep, env)
env
case While(cond, body, label) =>
typecheckExpect(cond, env, BooleanType)
typecheckStat(body, env)
env
case DoWhile(body, cond, label) =>
typecheckStat(body, env)
typecheckExpect(cond, env, BooleanType)
env
case Try(block, errVar, handler, finalizer) =>
typecheckStat(block, env)
if (handler != EmptyTree) {
val handlerEnv =
env.withLocal(LocalDef(errVar.name, AnyType, false)(errVar.pos))
typecheckStat(handler, handlerEnv)
}
if (finalizer != EmptyTree) {
typecheckStat(finalizer, env)
}
env
case Match(selector, cases, default) =>
typecheckExpr(selector, env)
for ((alts, body) <- cases) {
alts.foreach(typecheckExpr(_, env))
typecheckStat(body, env)
}
typecheckStat(default, env)
env
case Debugger() =>
env
case JSDelete(JSDotSelect(obj, prop)) =>
typecheckExpr(obj, env)
env
case JSDelete(JSBracketSelect(obj, prop)) =>
typecheckExpr(obj, env)
typecheckExpr(prop, env)
env
case _ =>
typecheck(tree, env)
env
}
}
def typecheckExpect(tree: Tree, env: Env, expectedType: Type)(
implicit ctx: ErrorContext): Unit = {
val tpe = typecheckExpr(tree, env)
if (!isSubtype(tpe, expectedType))
reportError(s"$expectedType expected but $tpe found "+
s"for tree of type ${tree.getClass.getName}")
}
def typecheckExpr(tree: Tree, env: Env): Type = {
implicit val ctx = ErrorContext(tree)
if (tree.tpe == NoType)
reportError(s"Expression tree has type NoType")
typecheck(tree, env)
}
private def typecheckExprOrSpread(tree: Tree, env: Env): Type = {
tree match {
case JSSpread(items) =>
typecheckExpr(items, env)
AnyType
case _ =>
typecheckExpr(tree, env)
}
}
def typecheck(tree: Tree, env: Env): Type = {
implicit val ctx = ErrorContext(tree)
def checkApplyGeneric(methodName: String, methodFullName: String,
args: List[Tree], isStatic: Boolean): Unit = {
val (methodParams, resultType) = inferMethodType(methodName, isStatic)
if (args.size != methodParams.size)
reportError(s"Arity mismatch: ${methodParams.size} expected but "+
s"${args.size} found")
for ((actual, formal) <- args zip methodParams) {
typecheckExpect(actual, env, formal)
}
if (!isConstructorName(methodName) && tree.tpe != resultType)
reportError(s"Call to $methodFullName of type $resultType "+
s"typed as ${tree.tpe}")
}
tree match {
// Control flow constructs
case Block(statsAndExpr) =>
val stats :+ expr = statsAndExpr
val envAfterStats = (env /: stats) { (prevEnv, stat) =>
typecheckStat(stat, prevEnv)
}
typecheckExpr(expr, envAfterStats)
case Labeled(label, tpe, body) =>
typecheckExpect(body, env.withLabeledReturnType(label.name, tpe), tpe)
case Return(expr, label) =>
env.returnTypes.get(label.map(_.name)).fold[Unit] {
reportError(s"Cannot return to label $label.")
typecheckExpr(expr, env)
} { returnType =>
typecheckExpect(expr, env, returnType)
}
case If(cond, thenp, elsep) =>
val tpe = tree.tpe
typecheckExpect(cond, env, BooleanType)
typecheckExpect(thenp, env, tpe)
typecheckExpect(elsep, env, tpe)
case While(BooleanLiteral(true), body, label) if tree.tpe == NothingType =>
typecheckStat(body, env)
case Try(block, errVar, handler, finalizer) =>
val tpe = tree.tpe
typecheckExpect(block, env, tpe)
if (handler != EmptyTree) {
val handlerEnv =
env.withLocal(LocalDef(errVar.name, AnyType, false)(errVar.pos))
typecheckExpect(handler, handlerEnv, tpe)
}
if (finalizer != EmptyTree) {
typecheckStat(finalizer, env)
}
case Throw(expr) =>
typecheckExpr(expr, env)
case Continue(label) =>
/* Here we could check that it is indeed legal to break to the
* specified label. However, if we do anything illegal here, it will
* result in a SyntaxError in JavaScript anyway, so we do not really
* care.
*/
case Match(selector, cases, default) =>
val tpe = tree.tpe
typecheckExpr(selector, env)
for ((alts, body) <- cases) {
alts.foreach(typecheckExpr(_, env))
typecheckExpect(body, env, tpe)
}
typecheckExpect(default, env, tpe)
// Scala expressions
case New(cls, ctor, args) =>
val clazz = lookupClass(cls)
if (!clazz.kind.isClass)
reportError(s"new $cls which is not a class")
checkApplyGeneric(ctor.name, s"$cls.$ctor", args, isStatic = false)
case LoadModule(cls) =>
if (!cls.className.endsWith("$"))
reportError("LoadModule of non-module class $cls")
case Select(qualifier, Ident(item, _)) =>
val qualType = typecheckExpr(qualifier, env)
qualType match {
case ClassType(cls) =>
val maybeClass = tryLookupClass(cls)
val kind = maybeClass.fold(_.kind, _.kind)
if (!kind.isClass) {
reportError(s"Cannot select $item of non-class $cls")
} else {
maybeClass.right foreach {
_.lookupField(item).fold[Unit] {
reportError(s"Class $cls does not have a field $item")
} { fieldDef =>
if (fieldDef.tpe != tree.tpe)
reportError(s"Select $cls.$item of type "+
s"${fieldDef.tpe} typed as ${tree.tpe}")
}
}
}
case NullType | NothingType =>
// always ok
case _ =>
reportError(s"Cannot select $item of non-class type $qualType")
}
case Apply(receiver, Ident(method, _), args) =>
val receiverType = typecheckExpr(receiver, env)
checkApplyGeneric(method, s"$receiverType.$method", args,
isStatic = false)
case ApplyStatically(receiver, cls, Ident(method, _), args) =>
typecheckExpect(receiver, env, cls)
checkApplyGeneric(method, s"$cls.$method", args, isStatic = false)
case ApplyStatic(cls, Ident(method, _), args) =>
val clazz = lookupClass(cls)
checkApplyGeneric(method, s"$cls.$method", args, isStatic = true)
case UnaryOp(op, lhs) =>
import UnaryOp._
(op: @switch) match {
case IntToLong =>
typecheckExpect(lhs, env, IntType)
case LongToInt | LongToDouble =>
typecheckExpect(lhs, env, LongType)
case DoubleToInt | DoubleToFloat | DoubleToLong =>
typecheckExpect(lhs, env, DoubleType)
case Boolean_! =>
typecheckExpect(lhs, env, BooleanType)
}
case BinaryOp(op, lhs, rhs) =>
import BinaryOp._
(op: @switch) match {
case === | !== | String_+ =>
typecheckExpr(lhs, env)
typecheckExpr(rhs, env)
case Int_+ | Int_- | Int_* | Int_/ | Int_% |
Int_| | Int_& | Int_^ | Int_<< | Int_>>> | Int_>> =>
typecheckExpect(lhs, env, IntType)
typecheckExpect(rhs, env, IntType)
case Float_+ | Float_- | Float_* | Float_/ | Float_% =>
typecheckExpect(lhs, env, FloatType)
typecheckExpect(lhs, env, FloatType)
case Long_+ | Long_- | Long_* | Long_/ | Long_% |
Long_| | Long_& | Long_^ |
Long_== | Long_!= | Long_< | Long_<= | Long_> | Long_>= =>
typecheckExpect(lhs, env, LongType)
typecheckExpect(rhs, env, LongType)
case Long_<< | Long_>>> | Long_>> =>
typecheckExpect(lhs, env, LongType)
typecheckExpect(rhs, env, IntType)
case Double_+ | Double_- | Double_* | Double_/ | Double_% |
Num_== | Num_!= | Num_< | Num_<= | Num_> | Num_>= =>
typecheckExpect(lhs, env, DoubleType)
typecheckExpect(lhs, env, DoubleType)
case Boolean_== | Boolean_!= | Boolean_| | Boolean_& =>
typecheckExpect(lhs, env, BooleanType)
typecheckExpect(rhs, env, BooleanType)
}
case NewArray(tpe, lengths) =>
for (length <- lengths)
typecheckExpect(length, env, IntType)
case ArrayValue(tpe, elems) =>
val elemType = arrayElemType(tpe)
for (elem <- elems)
typecheckExpect(elem, env, elemType)
case ArrayLength(array) =>
val arrayType = typecheckExpr(array, env)
if (!arrayType.isInstanceOf[ArrayType])
reportError(s"Array type expected but $arrayType found")
case ArraySelect(array, index) =>
typecheckExpect(index, env, IntType)
typecheckExpr(array, env) match {
case arrayType: ArrayType =>
if (tree.tpe != arrayElemType(arrayType))
reportError(s"Array select of array type $arrayType typed as ${tree.tpe}")
case arrayType =>
reportError(s"Array type expected but $arrayType found")
}
case IsInstanceOf(expr, cls) =>
typecheckExpr(expr, env)
case AsInstanceOf(expr, cls) =>
typecheckExpr(expr, env)
case Unbox(expr, _) =>
typecheckExpr(expr, env)
case GetClass(expr) =>
typecheckExpr(expr, env)
// JavaScript expressions
case JSNew(ctor, args) =>
typecheckExpr(ctor, env)
for (arg <- args)
typecheckExprOrSpread(arg, env)
case JSDotSelect(qualifier, item) =>
typecheckExpr(qualifier, env)
case JSBracketSelect(qualifier, item) =>
typecheckExpr(qualifier, env)
typecheckExpr(item, env)
case JSFunctionApply(fun, args) =>
typecheckExpr(fun, env)
for (arg <- args)
typecheckExprOrSpread(arg, env)
case JSDotMethodApply(receiver, method, args) =>
typecheckExpr(receiver, env)
for (arg <- args)
typecheckExprOrSpread(arg, env)
case JSBracketMethodApply(receiver, method, args) =>
typecheckExpr(receiver, env)
typecheckExpr(method, env)
for (arg <- args)
typecheckExprOrSpread(arg, env)
case JSUnaryOp(op, lhs) =>
typecheckExpr(lhs, env)
case JSBinaryOp(op, lhs, rhs) =>
typecheckExpr(lhs, env)
typecheckExpr(rhs, env)
case JSArrayConstr(items) =>
for (item <- items)
typecheckExprOrSpread(item, env)
case JSObjectConstr(fields) =>
for ((_, value) <- fields)
typecheckExpr(value, env)
case JSEnvInfo() =>
// Literals
case _: Literal =>
// Atomic expressions
case VarRef(Ident(name, _)) =>
env.locals.get(name).fold[Unit] {
reportError(s"Cannot find variable $name in scope")
} { localDef =>
if (tree.tpe != localDef.tpe)
reportError(s"Variable $name of type ${localDef.tpe} "+
s"typed as ${tree.tpe}")
}
case This() =>
if (!isSubtype(env.thisTpe, tree.tpe))
reportError(s"this of type ${env.thisTpe} typed as ${tree.tpe}")
case Closure(captureParams, params, body, captureValues) =>
if (captureParams.size != captureValues.size)
reportError("Mismatched size for captures: "+
s"${captureParams.size} params vs ${captureValues.size} values")
for ((ParamDef(name, ctpe, mutable), value) <- captureParams zip captureValues) {
if (mutable)
reportError(s"Capture parameter $name cannot be mutable")
if (ctpe == NoType)
reportError(s"Parameter $name has type NoType")
else
typecheckExpect(value, env, ctpe)
}
for (ParamDef(name, ptpe, mutable) <- params) {
if (ptpe == NoType)
reportError(s"Parameter $name has type NoType")
else if (ptpe != AnyType)
reportError(s"Closure parameter $name has type $ptpe instead of any")
}
val bodyEnv = Env.fromSignature(
AnyType, captureParams ++ params, AnyType)
typecheckExpect(body, bodyEnv, AnyType)
case _ =>
reportError(s"Invalid expression tree")
}
tree.tpe
}
def inferMethodType(encodedName: String, isStatic: Boolean)(
implicit ctx: ErrorContext): (List[Type], Type) = {
def dropPrivateMarker(params: List[String]): List[String] =
if (params.nonEmpty && params.head.startsWith("p")) params.tail
else params
if (isConstructorName(encodedName)) {
assert(!isStatic, "Constructor cannot be static")
val params = dropPrivateMarker(
encodedName.stripPrefix("init___").split("__").toList)
if (params == List("")) (Nil, NoType)
else (params.map(decodeType), NoType)
} else if (isReflProxyName(encodedName)) {
assert(!isStatic, "Refl proxy method cannot be static")
val params = dropPrivateMarker(encodedName.split("__").toList.tail)
(params.map(decodeType), AnyType)
} else {
val paramsAndResult0 =
encodedName.split("__").toList.tail
val paramsAndResult =
dropPrivateMarker(paramsAndResult0)
(paramsAndResult.init.map(decodeType), decodeType(paramsAndResult.last))
}
}
def decodeType(encodedName: String)(implicit ctx: ErrorContext): Type = {
if (encodedName.isEmpty) NoType
else if (encodedName.charAt(0) == 'A') {
// array type
val dims = encodedName.indexWhere(_ != 'A')
val base = encodedName.substring(dims)
ArrayType(base, dims)
} else if (encodedName.length == 1) {
(encodedName.charAt(0): @switch) match {
case 'V' => NoType
case 'Z' => BooleanType
case 'C' | 'B' | 'S' | 'I' => IntType
case 'J' => LongType
case 'F' => FloatType
case 'D' => DoubleType
case 'O' => AnyType
case 'T' => ClassType(StringClass) // NOT StringType
}
} else if (encodedName == "sr_Nothing$") {
NothingType
} else if (encodedName == "sr_Null$") {
NullType
} else {
val kind = tryLookupClass(encodedName).fold(_.kind, _.kind)
if (kind == ClassKind.RawJSType) AnyType
else ClassType(encodedName)
}
}
def arrayElemType(arrayType: ArrayType)(implicit ctx: ErrorContext): Type = {
if (arrayType.dimensions == 1) decodeType(arrayType.baseClassName)
else ArrayType(arrayType.baseClassName, arrayType.dimensions-1)
}
def reportError(msg: String)(implicit ctx: ErrorContext): Unit = {
logger.error(s"$ctx: $msg")
_errorCount += 1
}
def lookupInfo(className: String)(implicit ctx: ErrorContext): Infos.ClassInfo = {
unit.infos.getOrElse(className, {
reportError(s"Cannot find info for class $className")
Infos.ClassInfo(className)
})
}
def tryLookupClass(className: String)(
implicit ctx: ErrorContext): Either[Infos.ClassInfo, CheckedClass] = {
classes.get(className).fold[Either[Infos.ClassInfo, CheckedClass]](
Left(lookupInfo(className)))(Right(_))
}
def lookupClass(className: String)(implicit ctx: ErrorContext): CheckedClass = {
classes.getOrElseUpdate(className, {
reportError(s"Cannot find class $className")
new CheckedClass(className, ClassKind.Class,
Some(ObjectClass), Set(ObjectClass))
})
}
def lookupClass(classType: ClassType)(implicit ctx: ErrorContext): CheckedClass =
lookupClass(classType.className)
def isSubclass(lhs: String, rhs: String)(implicit ctx: ErrorContext): Boolean = {
tryLookupClass(lhs).fold({ info =>
val parents = info.superClass ++: info.interfaces
parents.exists(_ == rhs) || parents.exists(isSubclass(_, rhs))
}, { lhsClass =>
lhsClass.ancestors.contains(rhs)
})
}
def isSubtype(lhs: Type, rhs: Type)(implicit ctx: ErrorContext): Boolean = {
Types.isSubtype(lhs, rhs)(isSubclass)
}
class Env(
/** Type of `this`. Can be NoType. */
val thisTpe: Type,
/** Local variables in scope (including through closures). */
val locals: Map[String, LocalDef],
/** Return types by label. */
val returnTypes: Map[Option[String], Type],
/** Whether we're in a constructor of the class */
val inConstructor: Boolean
) {
def withThis(thisTpe: Type): Env =
new Env(thisTpe, this.locals, this.returnTypes, this.inConstructor)
def withLocal(localDef: LocalDef): Env =
new Env(thisTpe, locals + (localDef.name -> localDef), returnTypes,
this.inConstructor)
def withLocals(localDefs: TraversableOnce[LocalDef]): Env =
new Env(thisTpe, locals ++ localDefs.map(d => d.name -> d), returnTypes,
this.inConstructor)
def withReturnType(returnType: Type): Env =
new Env(this.thisTpe, this.locals,
returnTypes + (None -> returnType), this.inConstructor)
def withLabeledReturnType(label: String, returnType: Type): Env =
new Env(this.thisTpe, this.locals,
returnTypes + (Some(label) -> returnType), this.inConstructor)
def withArgumentsVar(pos: Position): Env =
withLocal(LocalDef("arguments", AnyType, mutable = false)(pos))
def withInConstructor(inConstructor: Boolean): Env =
new Env(this.thisTpe, this.locals, this.returnTypes, inConstructor)
}
object Env {
val empty: Env = new Env(NoType, Map.empty, Map.empty, false)
def fromSignature(thisType: Type, params: List[ParamDef],
resultType: Type, isConstructor: Boolean = false): Env = {
val paramLocalDefs =
for (p @ ParamDef(name, tpe, mutable) <- params) yield
name.name -> LocalDef(name.name, tpe, mutable)(p.pos)
new Env(thisType, paramLocalDefs.toMap,
Map(None -> (if (resultType == NoType) AnyType else resultType)),
isConstructor)
}
}
class CheckedClass(
val name: String,
val kind: ClassKind,
val superClassName: Option[String],
val ancestors: Set[String],
_fields: TraversableOnce[CheckedField] = Nil)(
implicit ctx: ErrorContext) {
val fields = _fields.map(f => f.name -> f).toMap
lazy val superClass = superClassName.map(classes)
def this(classDef: LinkedClass)(implicit ctx: ErrorContext) = {
this(classDef.name.name, classDef.kind,
classDef.superClass.map(_.name),
classDef.ancestors.toSet,
classDef.fields.map(CheckedClass.checkedField))
}
def isAncestorOfHijackedClass: Boolean =
AncestorsOfHijackedClasses.contains(name)
def lookupField(name: String): Option[CheckedField] =
fields.get(name).orElse(superClass.flatMap(_.lookupField(name)))
}
object CheckedClass {
private def checkedField(fieldDef: FieldDef) = {
val FieldDef(Ident(name, _), tpe, mutable) = fieldDef
new CheckedField(name, tpe, mutable)
}
}
class CheckedField(val name: String, val tpe: Type, val mutable: Boolean)
}
object IRChecker {
private final class ErrorContext private (pos: Position, name: String) {
override def toString(): String =
s"${pos.source}(${pos.line+1}:${pos.column+1}:$name)"
}
private object ErrorContext {
implicit def tree2errorContext(tree: Tree): ErrorContext =
ErrorContext(tree)
def apply(tree: Tree): ErrorContext =
new ErrorContext(tree.pos, tree.getClass.getSimpleName)
def apply(linkedClass: LinkedClass): ErrorContext =
new ErrorContext(linkedClass.pos, "ClassDef")
}
private def isConstructorName(name: String): Boolean =
name.startsWith("init___")
private def isReflProxyName(name: String): Boolean =
name.endsWith("__") && !isConstructorName(name)
case class LocalDef(name: String, tpe: Type, mutable: Boolean)(val pos: Position)
}
| colinrgodsey/scala-js | tools/shared/src/main/scala/org/scalajs/core/tools/optimizer/IRChecker.scala | Scala | bsd-3-clause | 32,109 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import kafka.api.{ApiVersion, ApiVersionValidator, KAFKA_3_0_IV1}
import kafka.log.LogConfig.configDef
import kafka.message.BrokerCompressionCodec
import kafka.server.{KafkaConfig, ThrottledReplicaListValidator}
import kafka.utils.Implicits._
import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator}
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, TopicConfig}
import org.apache.kafka.common.errors.InvalidConfigurationException
import org.apache.kafka.common.record.{LegacyRecord, RecordVersion, TimestampType}
import org.apache.kafka.common.utils.{ConfigUtils, Utils}
import java.util.{Collections, Locale, Properties}
import scala.annotation.nowarn
import scala.collection.{Map, mutable}
import scala.jdk.CollectionConverters._
object Defaults {
val SegmentSize = kafka.server.Defaults.LogSegmentBytes
val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L
val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L
val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages
val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs
val RetentionSize = kafka.server.Defaults.LogRetentionBytes
val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L
val RemoteLogStorageEnable = false
val LocalRetentionBytes = -2 // It indicates the value to be derived from RetentionSize
val LocalRetentionMs = -2 // It indicates the value to be derived from RetentionMs
val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes
val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes
val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes
val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs
val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs
val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs
val MaxCompactionLagMs = kafka.server.Defaults.LogCleanerMaxCompactionLagMs
val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio
val CleanupPolicy = kafka.server.Defaults.LogCleanupPolicy
val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable
val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas
val CompressionType = kafka.server.Defaults.CompressionType
val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0")
val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion
val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType
val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs
val LeaderReplicationThrottledReplicas = Collections.emptyList[String]()
val FollowerReplicationThrottledReplicas = Collections.emptyList[String]()
val MaxIdMapSnapshots = kafka.server.Defaults.MaxIdMapSnapshots
val MessageDownConversionEnable = kafka.server.Defaults.MessageDownConversionEnable
}
case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String] = Set.empty)
extends AbstractConfig(LogConfig.configDef, props, false) {
/**
* Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig
* should also go in [[LogConfig.extractLogConfigMap()]].
*/
val segmentSize = getInt(LogConfig.SegmentBytesProp)
val segmentMs = getLong(LogConfig.SegmentMsProp)
val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)
val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp)
val flushInterval = getLong(LogConfig.FlushMessagesProp)
val flushMs = getLong(LogConfig.FlushMsProp)
val retentionSize = getLong(LogConfig.RetentionBytesProp)
val retentionMs = getLong(LogConfig.RetentionMsProp)
val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp)
val indexInterval = getInt(LogConfig.IndexIntervalBytesProp)
val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp)
val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp)
val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp)
val maxCompactionLagMs = getLong(LogConfig.MaxCompactionLagMsProp)
val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp)
val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact)
val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete)
val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp)
val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp)
val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT)
val preallocate = getBoolean(LogConfig.PreAllocateEnableProp)
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0")
val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp))
val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp))
val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue
val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp)
val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp)
val messageDownConversionEnable = getBoolean(LogConfig.MessageDownConversionEnableProp)
val remoteStorageEnable = getBoolean(LogConfig.RemoteLogStorageEnableProp)
val localRetentionMs: Long = {
val localLogRetentionMs = getLong(LogConfig.LocalLogRetentionMsProp)
// -2 indicates to derive value from retentionMs property.
if(localLogRetentionMs == -2) retentionMs
else {
// Added validation here to check the effective value should not be more than RetentionMs.
if(localLogRetentionMs == -1 && retentionMs != -1) {
throw new ConfigException(LogConfig.LocalLogRetentionMsProp, localLogRetentionMs, s"Value must not be -1 as ${LogConfig.RetentionMsProp} value is set as $retentionMs.")
}
if (localLogRetentionMs > retentionMs) {
throw new ConfigException(LogConfig.LocalLogRetentionMsProp, localLogRetentionMs, s"Value must not be more than property: ${LogConfig.RetentionMsProp} value.")
}
localLogRetentionMs
}
}
val localRetentionBytes: Long = {
val localLogRetentionBytes = getLong(LogConfig.LocalLogRetentionBytesProp)
// -2 indicates to derive value from retentionSize property.
if(localLogRetentionBytes == -2) retentionSize;
else {
// Added validation here to check the effective value should not be more than RetentionBytes.
if(localLogRetentionBytes == -1 && retentionSize != -1) {
throw new ConfigException(LogConfig.LocalLogRetentionBytesProp, localLogRetentionBytes, s"Value must not be -1 as ${LogConfig.RetentionBytesProp} value is set as $retentionSize.")
}
if (localLogRetentionBytes > retentionSize) {
throw new ConfigException(LogConfig.LocalLogRetentionBytesProp, localLogRetentionBytes, s"Value must not be more than property: ${LogConfig.RetentionBytesProp} value.");
}
localLogRetentionBytes
}
}
@nowarn("cat=deprecation")
def recordVersion = messageFormatVersion.recordVersion
def randomSegmentJitter: Long =
if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs)
def maxSegmentMs: Long = {
if (compact && maxCompactionLagMs > 0) math.min(maxCompactionLagMs, segmentMs)
else segmentMs
}
def initFileSize: Int = {
if (preallocate)
segmentSize
else
0
}
def overriddenConfigsAsLoggableString: String = {
val overriddenTopicProps = props.asScala.collect {
case (k: String, v) if overriddenConfigs.contains(k) => (k, v.asInstanceOf[AnyRef])
}
ConfigUtils.configMapToRedactedString(overriddenTopicProps.asJava, configDef)
}
}
object LogConfig {
def main(args: Array[String]): Unit = {
println(configDef.toHtml(4, (config: String) => "topicconfigs_" + config))
}
val SegmentBytesProp = TopicConfig.SEGMENT_BYTES_CONFIG
val SegmentMsProp = TopicConfig.SEGMENT_MS_CONFIG
val SegmentJitterMsProp = TopicConfig.SEGMENT_JITTER_MS_CONFIG
val SegmentIndexBytesProp = TopicConfig.SEGMENT_INDEX_BYTES_CONFIG
val FlushMessagesProp = TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG
val FlushMsProp = TopicConfig.FLUSH_MS_CONFIG
val RetentionBytesProp = TopicConfig.RETENTION_BYTES_CONFIG
val RetentionMsProp = TopicConfig.RETENTION_MS_CONFIG
val RemoteLogStorageEnableProp = TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG
val LocalLogRetentionMsProp = TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG
val LocalLogRetentionBytesProp = TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG
val MaxMessageBytesProp = TopicConfig.MAX_MESSAGE_BYTES_CONFIG
val IndexIntervalBytesProp = TopicConfig.INDEX_INTERVAL_BYTES_CONFIG
val DeleteRetentionMsProp = TopicConfig.DELETE_RETENTION_MS_CONFIG
val MinCompactionLagMsProp = TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG
val MaxCompactionLagMsProp = TopicConfig.MAX_COMPACTION_LAG_MS_CONFIG
val FileDeleteDelayMsProp = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG
val MinCleanableDirtyRatioProp = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG
val CleanupPolicyProp = TopicConfig.CLEANUP_POLICY_CONFIG
val Delete = TopicConfig.CLEANUP_POLICY_DELETE
val Compact = TopicConfig.CLEANUP_POLICY_COMPACT
val UncleanLeaderElectionEnableProp = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG
val MinInSyncReplicasProp = TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG
val CompressionTypeProp = TopicConfig.COMPRESSION_TYPE_CONFIG
val PreAllocateEnableProp = TopicConfig.PREALLOCATE_CONFIG
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0") @nowarn("cat=deprecation")
val MessageFormatVersionProp = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG
val MessageTimestampTypeProp = TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG
val MessageTimestampDifferenceMaxMsProp = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG
val MessageDownConversionEnableProp = TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG
// Leave these out of TopicConfig for now as they are replication quota configs
val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas"
val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas"
val SegmentSizeDoc = TopicConfig.SEGMENT_BYTES_DOC
val SegmentMsDoc = TopicConfig.SEGMENT_MS_DOC
val SegmentJitterMsDoc = TopicConfig.SEGMENT_JITTER_MS_DOC
val MaxIndexSizeDoc = TopicConfig.SEGMENT_INDEX_BYTES_DOC
val FlushIntervalDoc = TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC
val FlushMsDoc = TopicConfig.FLUSH_MS_DOC
val RetentionSizeDoc = TopicConfig.RETENTION_BYTES_DOC
val RetentionMsDoc = TopicConfig.RETENTION_MS_DOC
val RemoteLogStorageEnableDoc = TopicConfig.REMOTE_LOG_STORAGE_ENABLE_DOC
val LocalLogRetentionMsDoc = TopicConfig.LOCAL_LOG_RETENTION_MS_DOC
val LocalLogRetentionBytesDoc = TopicConfig.LOCAL_LOG_RETENTION_BYTES_DOC
val MaxMessageSizeDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC
val IndexIntervalDoc = TopicConfig.INDEX_INTERVAL_BYTES_DOCS
val FileDeleteDelayMsDoc = TopicConfig.FILE_DELETE_DELAY_MS_DOC
val DeleteRetentionMsDoc = TopicConfig.DELETE_RETENTION_MS_DOC
val MinCompactionLagMsDoc = TopicConfig.MIN_COMPACTION_LAG_MS_DOC
val MaxCompactionLagMsDoc = TopicConfig.MAX_COMPACTION_LAG_MS_DOC
val MinCleanableRatioDoc = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC
val CompactDoc = TopicConfig.CLEANUP_POLICY_DOC
val UncleanLeaderElectionEnableDoc = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC
val MinInSyncReplicasDoc = TopicConfig.MIN_IN_SYNC_REPLICAS_DOC
val CompressionTypeDoc = TopicConfig.COMPRESSION_TYPE_DOC
val PreAllocateEnableDoc = TopicConfig.PREALLOCATE_DOC
/* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */
@deprecated("3.0") @nowarn("cat=deprecation")
val MessageFormatVersionDoc = TopicConfig.MESSAGE_FORMAT_VERSION_DOC
val MessageTimestampTypeDoc = TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC
val MessageTimestampDifferenceMaxMsDoc = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC
val MessageDownConversionEnableDoc = TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_DOC
val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
"the leader side. The list should describe a set of replicas in the form " +
"[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
"all replicas for this topic."
val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
"the follower side. The list should describe a set of " + "replicas in the form " +
"[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
"all replicas for this topic."
private[log] val ServerDefaultHeaderName = "Server Default Property"
val configsWithNoServerDefaults: Set[String] = Set(RemoteLogStorageEnableProp, LocalLogRetentionMsProp, LocalLogRetentionBytesProp);
// Package private for testing
private[log] class LogConfigDef(base: ConfigDef) extends ConfigDef(base) {
def this() = this(new ConfigDef)
private final val serverDefaultConfigNames = mutable.Map[String, String]()
base match {
case b: LogConfigDef => serverDefaultConfigNames ++= b.serverDefaultConfigNames
case _ =>
}
def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator,
importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = {
super.define(name, defType, defaultValue, validator, importance, doc)
serverDefaultConfigNames.put(name, serverDefaultConfigName)
this
}
def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance,
documentation: String, serverDefaultConfigName: String): LogConfigDef = {
super.define(name, defType, defaultValue, importance, documentation)
serverDefaultConfigNames.put(name, serverDefaultConfigName)
this
}
def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String,
serverDefaultConfigName: String): LogConfigDef = {
super.define(name, defType, importance, documentation)
serverDefaultConfigNames.put(name, serverDefaultConfigName)
this
}
override def headers = List("Name", "Description", "Type", "Default", "Valid Values", ServerDefaultHeaderName,
"Importance").asJava
override def getConfigValue(key: ConfigKey, headerName: String): String = {
headerName match {
case ServerDefaultHeaderName => serverDefaultConfigNames.getOrElse(key.name, null)
case _ => super.getConfigValue(key, headerName)
}
}
def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName)
}
// Package private for testing, return a copy since it's a mutable global variable
private[kafka] def configDefCopy: LogConfigDef = new LogConfigDef(configDef)
private val configDef: LogConfigDef = {
import org.apache.kafka.common.config.ConfigDef.Importance._
import org.apache.kafka.common.config.ConfigDef.Range._
import org.apache.kafka.common.config.ConfigDef.Type._
import org.apache.kafka.common.config.ConfigDef.ValidString._
@nowarn("cat=deprecation")
val logConfigDef = new LogConfigDef()
.define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM,
SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp)
.define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(1), MEDIUM, SegmentMsDoc,
KafkaConfig.LogRollTimeMillisProp)
.define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc,
KafkaConfig.LogRollTimeJitterMillisProp)
.define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc,
KafkaConfig.LogIndexSizeMaxBytesProp)
.define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc,
KafkaConfig.LogFlushIntervalMessagesProp)
.define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc,
KafkaConfig.LogFlushIntervalMsProp)
// can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize
.define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc,
KafkaConfig.LogRetentionBytesProp)
// can be negative. See kafka.log.LogManager.cleanupExpiredSegments
.define(RetentionMsProp, LONG, Defaults.RetentionMs, atLeast(-1), MEDIUM, RetentionMsDoc,
KafkaConfig.LogRetentionTimeMillisProp)
.define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc,
KafkaConfig.MessageMaxBytesProp)
.define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc,
KafkaConfig.LogIndexIntervalBytesProp)
.define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM,
DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp)
.define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc,
KafkaConfig.LogCleanerMinCompactionLagMsProp)
.define(MaxCompactionLagMsProp, LONG, Defaults.MaxCompactionLagMs, atLeast(1), MEDIUM, MaxCompactionLagMsDoc,
KafkaConfig.LogCleanerMaxCompactionLagMsProp)
.define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc,
KafkaConfig.LogDeleteDelayMsProp)
.define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM,
MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp)
.define(CleanupPolicyProp, LIST, Defaults.CleanupPolicy, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc,
KafkaConfig.LogCleanupPolicyProp)
.define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable,
MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp)
.define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc,
KafkaConfig.MinInSyncReplicasProp)
.define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*),
MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp)
.define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc,
KafkaConfig.LogPreAllocateProp)
.define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, ApiVersionValidator, MEDIUM, MessageFormatVersionDoc,
KafkaConfig.LogMessageFormatVersionProp)
.define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, MessageTimestampTypeDoc,
KafkaConfig.LogMessageTimestampTypeProp)
.define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs,
atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
.define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp)
.define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp)
.define(MessageDownConversionEnableProp, BOOLEAN, Defaults.MessageDownConversionEnable, LOW,
MessageDownConversionEnableDoc, KafkaConfig.LogMessageDownConversionEnableProp)
// RemoteLogStorageEnableProp, LocalLogRetentionMsProp, LocalLogRetentionBytesProp do not have server default
// config names.
logConfigDef
// This define method is not overridden in LogConfig as these configs do not have server defaults yet.
.define(RemoteLogStorageEnableProp, BOOLEAN, Defaults.RemoteLogStorageEnable, MEDIUM, RemoteLogStorageEnableDoc)
.define(LocalLogRetentionMsProp, LONG, Defaults.LocalRetentionMs, atLeast(-2), MEDIUM, LocalLogRetentionMsDoc)
.define(LocalLogRetentionBytesProp, LONG, Defaults.LocalRetentionBytes, atLeast(-2), MEDIUM, LocalLogRetentionBytesDoc)
logConfigDef
}
def apply(): LogConfig = LogConfig(new Properties())
def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted
def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName)
def configType(configName: String): Option[ConfigDef.Type] = {
Option(configDef.configKeys.get(configName)).map(_.`type`)
}
/**
* Create a log config instance using the given properties and defaults
*/
def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = {
val props = new Properties()
defaults.forEach { (k, v) => props.put(k, v) }
props ++= overrides
val overriddenKeys = overrides.keySet.asScala.map(_.asInstanceOf[String]).toSet
new LogConfig(props, overriddenKeys)
}
/**
* Check that property names are valid
*/
def validateNames(props: Properties): Unit = {
val names = configNames
for(name <- props.asScala.keys)
if (!names.contains(name))
throw new InvalidConfigurationException(s"Unknown topic config name: $name")
}
private[kafka] def configKeys: Map[String, ConfigKey] = configDef.configKeys.asScala
def validateValues(props: java.util.Map[_, _]): Unit = {
val minCompactionLag = props.get(MinCompactionLagMsProp).asInstanceOf[Long]
val maxCompactionLag = props.get(MaxCompactionLagMsProp).asInstanceOf[Long]
if (minCompactionLag > maxCompactionLag) {
throw new InvalidConfigurationException(s"conflict topic config setting $MinCompactionLagMsProp " +
s"($minCompactionLag) > $MaxCompactionLagMsProp ($maxCompactionLag)")
}
}
/**
* Check that the given properties contain only valid log config names and that all values can be parsed and are valid
*/
def validate(props: Properties): Unit = {
validateNames(props)
val valueMaps = configDef.parse(props)
validateValues(valueMaps)
}
/**
* Map topic config to the broker config with highest priority. Some of these have additional synonyms
* that can be obtained using [[kafka.server.DynamicBrokerConfig#brokerConfigSynonyms]]
*/
@nowarn("cat=deprecation")
val TopicConfigSynonyms = Map(
SegmentBytesProp -> KafkaConfig.LogSegmentBytesProp,
SegmentMsProp -> KafkaConfig.LogRollTimeMillisProp,
SegmentJitterMsProp -> KafkaConfig.LogRollTimeJitterMillisProp,
SegmentIndexBytesProp -> KafkaConfig.LogIndexSizeMaxBytesProp,
FlushMessagesProp -> KafkaConfig.LogFlushIntervalMessagesProp,
FlushMsProp -> KafkaConfig.LogFlushIntervalMsProp,
RetentionBytesProp -> KafkaConfig.LogRetentionBytesProp,
RetentionMsProp -> KafkaConfig.LogRetentionTimeMillisProp,
MaxMessageBytesProp -> KafkaConfig.MessageMaxBytesProp,
IndexIntervalBytesProp -> KafkaConfig.LogIndexIntervalBytesProp,
DeleteRetentionMsProp -> KafkaConfig.LogCleanerDeleteRetentionMsProp,
MinCompactionLagMsProp -> KafkaConfig.LogCleanerMinCompactionLagMsProp,
MaxCompactionLagMsProp -> KafkaConfig.LogCleanerMaxCompactionLagMsProp,
FileDeleteDelayMsProp -> KafkaConfig.LogDeleteDelayMsProp,
MinCleanableDirtyRatioProp -> KafkaConfig.LogCleanerMinCleanRatioProp,
CleanupPolicyProp -> KafkaConfig.LogCleanupPolicyProp,
UncleanLeaderElectionEnableProp -> KafkaConfig.UncleanLeaderElectionEnableProp,
MinInSyncReplicasProp -> KafkaConfig.MinInSyncReplicasProp,
CompressionTypeProp -> KafkaConfig.CompressionTypeProp,
PreAllocateEnableProp -> KafkaConfig.LogPreAllocateProp,
MessageFormatVersionProp -> KafkaConfig.LogMessageFormatVersionProp,
MessageTimestampTypeProp -> KafkaConfig.LogMessageTimestampTypeProp,
MessageTimestampDifferenceMaxMsProp -> KafkaConfig.LogMessageTimestampDifferenceMaxMsProp,
MessageDownConversionEnableProp -> KafkaConfig.LogMessageDownConversionEnableProp
)
/**
* Copy the subset of properties that are relevant to Logs. The individual properties
* are listed here since the names are slightly different in each Config class...
*/
@nowarn("cat=deprecation")
def extractLogConfigMap(
kafkaConfig: KafkaConfig
): java.util.Map[String, Object] = {
val logProps = new java.util.HashMap[String, Object]()
logProps.put(SegmentBytesProp, kafkaConfig.logSegmentBytes)
logProps.put(SegmentMsProp, kafkaConfig.logRollTimeMillis)
logProps.put(SegmentJitterMsProp, kafkaConfig.logRollTimeJitterMillis)
logProps.put(SegmentIndexBytesProp, kafkaConfig.logIndexSizeMaxBytes)
logProps.put(FlushMessagesProp, kafkaConfig.logFlushIntervalMessages)
logProps.put(FlushMsProp, kafkaConfig.logFlushIntervalMs)
logProps.put(RetentionBytesProp, kafkaConfig.logRetentionBytes)
logProps.put(RetentionMsProp, kafkaConfig.logRetentionTimeMillis: java.lang.Long)
logProps.put(MaxMessageBytesProp, kafkaConfig.messageMaxBytes)
logProps.put(IndexIntervalBytesProp, kafkaConfig.logIndexIntervalBytes)
logProps.put(DeleteRetentionMsProp, kafkaConfig.logCleanerDeleteRetentionMs)
logProps.put(MinCompactionLagMsProp, kafkaConfig.logCleanerMinCompactionLagMs)
logProps.put(MaxCompactionLagMsProp, kafkaConfig.logCleanerMaxCompactionLagMs)
logProps.put(FileDeleteDelayMsProp, kafkaConfig.logDeleteDelayMs)
logProps.put(MinCleanableDirtyRatioProp, kafkaConfig.logCleanerMinCleanRatio)
logProps.put(CleanupPolicyProp, kafkaConfig.logCleanupPolicy)
logProps.put(MinInSyncReplicasProp, kafkaConfig.minInSyncReplicas)
logProps.put(CompressionTypeProp, kafkaConfig.compressionType)
logProps.put(UncleanLeaderElectionEnableProp, kafkaConfig.uncleanLeaderElectionEnable)
logProps.put(PreAllocateEnableProp, kafkaConfig.logPreAllocateEnable)
logProps.put(MessageFormatVersionProp, kafkaConfig.logMessageFormatVersion.version)
logProps.put(MessageTimestampTypeProp, kafkaConfig.logMessageTimestampType.name)
logProps.put(MessageTimestampDifferenceMaxMsProp, kafkaConfig.logMessageTimestampDifferenceMaxMs: java.lang.Long)
logProps.put(MessageDownConversionEnableProp, kafkaConfig.logMessageDownConversionEnable: java.lang.Boolean)
logProps
}
def shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion: ApiVersion): Boolean =
interBrokerProtocolVersion >= KAFKA_3_0_IV1
class MessageFormatVersion(messageFormatVersionString: String, interBrokerProtocolVersionString: String) {
val messageFormatVersion = ApiVersion(messageFormatVersionString)
private val interBrokerProtocolVersion = ApiVersion(interBrokerProtocolVersionString)
def shouldIgnore: Boolean = shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion)
def shouldWarn: Boolean =
interBrokerProtocolVersion >= KAFKA_3_0_IV1 && messageFormatVersion.recordVersion.precedes(RecordVersion.V2)
@nowarn("cat=deprecation")
def topicWarningMessage(topicName: String): String = {
s"Topic configuration ${LogConfig.MessageFormatVersionProp} with value `$messageFormatVersionString` is ignored " +
s"for `$topicName` because the inter-broker protocol version `$interBrokerProtocolVersionString` is " +
"greater or equal than 3.0. This configuration is deprecated and it will be removed in Apache Kafka 4.0."
}
@nowarn("cat=deprecation")
def brokerWarningMessage: String = {
s"Broker configuration ${KafkaConfig.LogMessageFormatVersionProp} with value $messageFormatVersionString is ignored " +
s"because the inter-broker protocol version `$interBrokerProtocolVersionString` is greater or equal than 3.0. " +
"This configuration is deprecated and it will be removed in Apache Kafka 4.0."
}
}
}
| lindong28/kafka | core/src/main/scala/kafka/log/LogConfig.scala | Scala | apache-2.0 | 29,484 |
package org.jetbrains.plugins.scala
package lang
package transformation
package calls
/**
* @author Pavel Fatin
*/
class ExpandApplyCallTest extends TransformerTest(new ExpandApplyCall()) {
override protected val header: String =
"""
object O {
def apply(p: A) {}
def apply(p1: A, p2: A) {}
def f(p: A) {}
}
"""
def testSingleArgument(): Unit = check(
before = "O(A)",
after = "O.apply(A)"
)()
def testMultipleArguments(): Unit = check(
before = "O(A, A)",
after = "O.apply(A, A)"
)()
def testSynthetic(): Unit = check(
before = "S(A)",
after = "S.apply(A)"
)(header = "case class S(a: A)")
def testExplicit(): Unit = check(
before = "O.apply(A)",
after = "O.apply(A)"
)()
def testOtherMethod(): Unit = check(
before = "O.f(A)",
after = "O.f(A)"
)()
def testIndirectResolution(): Unit = check(
before = "v(A)",
after = "v.apply(A)"
)(header = "val v = O")
def testCompoundQualifier(): Unit = check(
before = "O1.O2(A)",
after = "O1.O2.apply(A)"
)(header =
"""
object O1 {
object O2 {
def apply(p: A): Unit = {}
}
}
""")
// TODO test renamed "apply" method
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/transformation/calls/ExpandApplyCallTest.scala | Scala | apache-2.0 | 1,238 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest}
class RelationalScalarFunctionTest extends AsyncTest[RelationalTestDB] {
import tdb.profile.api._
def test = {
def check[T](q: Rep[T], exp: T) = q.result.map(_ shouldBe exp)
def checkLit[T : ColumnType](v: T) = check(LiteralColumn(v), v)
val s = "abcdefghijklmnopqrstuvwxyz"
seq(
// Literals
checkLit(false),
checkLit(true),
checkLit(42: Byte),
checkLit(-42: Byte),
checkLit(42),
checkLit(-42),
checkLit(17.5),
checkLit(-17.5),
checkLit(17.5f),
checkLit(-17.5f),
checkLit(42l),
checkLit(-42l),
checkLit("foo"),
check("42".asColumnOf[Int], 42),
check(LiteralColumn("foo").length, 3),
check(LiteralColumn("foo") ++ "bar", "foobar"),
check(LiteralColumn(1) ifNull 42, 1),
check(LiteralColumn[Option[Int]](None) ifNull 42, 42),
check(LiteralColumn("Foo").toUpperCase, "FOO"),
check(LiteralColumn("Foo").toLowerCase, "foo"),
check(LiteralColumn(" foo ").ltrim, "foo "),
check(LiteralColumn(" foo ").rtrim, " foo"),
// FIXME: broken in DB2, which does not seem to support nested {fn ...} calls
// check(LiteralColumn(" foo ").trim, "foo")
Functions.database.toLowerCase.result,
Functions.user.toLowerCase.result,
check(LiteralColumn(8) % 3, 2),
check(LiteralColumn(-12.5).abs, 12.5),
check(LiteralColumn(1.9).ceil, 2.0),
check(LiteralColumn(1.5).ceil, 2.0),
check(LiteralColumn(1.4).ceil, 2.0),
check(LiteralColumn(-1.9).ceil, -1.0),
check(LiteralColumn(-1.5).ceil, -1.0),
check(LiteralColumn(-1.4).ceil, -1.0),
check(LiteralColumn(1.5).floor, 1.0),
check(LiteralColumn(1.4).floor, 1.0),
check(LiteralColumn(-1.5).floor, -2.0),
check(LiteralColumn(-10.0).sign, -1),
Functions.pi.toDegrees.result.map(_.should(r => r > 179.9999 && r < 180.0001)),
(Functions.pi.toDegrees.toRadians - Functions.pi).abs.result.map(_.should(_ <= 0.00001)),
check(LiteralColumn(s).substring(3, 5), s.substring(3, 5)),
check(LiteralColumn(s).substring(3), s.substring(3)),
check(LiteralColumn(s).take(3), s.take(3)),
check(LiteralColumn(s).drop(3), s.drop(3)),
ifCap(rcap.replace)(check(LiteralColumn(s).replace("cd", "XXX"), s.replace("cd", "XXX"))),
ifCap(rcap.reverse)(check(LiteralColumn(s).reverseString, s.reverse)),
ifCap(rcap.indexOf)(seq(
check(LiteralColumn(s).indexOf("o"), s.indexOf("o")),
check(LiteralColumn(s).indexOf("7"), s.indexOf("7"))
))
)
}
}
| nuodb/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/RelationalScalarFunctionTest.scala | Scala | bsd-2-clause | 2,695 |
package org.test
import org.nlogo.api
import api.Syntax._
import api.ScalaConversions._
import org.nlogo.api._
class ComputeMean extends api.DefaultReporter {
override def getSyntax = reporterSyntax(Array(Syntax.ListType), NumberType)
def average(numbers: Vector[Double]): Double = numbers.sum / numbers.length
def report(args: Array[api.Argument], context: api.Context):AnyRef = {
val logoListNumbers:LogoList = try args(0).getList
val logoListCasted:Vector[Double] = logoListNumbers.toVector.map {
case d: java.lang.Double => d: Double
case _ => throw new ExtensionException("The list can only contain numbers")
}
average(logoListCasted).toLogoObject
}
} | Spatial-ABM-with-Netlogo/Chapitre-A | scala-plugin-netlogo-sbt/src/ComputeMean.scala | Scala | agpl-3.0 | 701 |
package edison.model.domain
import edison.model._
object ParamDefs {
def apply(params: ParamDef*): ParamDefs = {
new ParamDefs(params.toVector, Map[ParamName, ParamDef](params.map({ p => (p.name, p) }): _*))
}
}
/**
* An ordered set of ParamDefs that allows for accessing parameter definitions by name and by index.
*/
case class ParamDefs private (list: Vector[ParamDef], map: Map[ParamName, ParamDef]) {
def apply(idx: Int): ParamDef = list(idx)
def apply(paramName: ParamName): ParamDef = map(paramName)
def size: Int = list.size
}
| pawel-wiejacha/edison | core/src/main/scala/edison/model/domain/ParamDefs.scala | Scala | mit | 554 |
/*
* Copyright (c) 2014 Robert Conrad - All Rights Reserved.
* Unauthorized copying of this file, via any medium is strictly prohibited.
* This file is proprietary and confidential.
* Last modified by rconrad, 12/24/14 4:37 PM
*/
package base.rest.route
import akka.actor.ActorRefFactory
/**
* Builds the main endpoint discoverability routes
* @author rconrad
*/
private[rest] object RestVersionsRouteFactory {
def apply(actors: ActorRefFactory) = new RestVersionsRoute {
def actorRefFactory = actors
}.routes
}
| robconrad/base-api | project-rest/src/main/scala/base/rest/route/RestVersionsRouteFactory.scala | Scala | mit | 533 |
/*
* Demo of using by name implicits to resolve (hidden) divergence issues when
* traversing recursive generic structures.
*
* See http://stackoverflow.com/questions/25923974
*/
sealed trait HList
object HList {
implicit class Syntax[L <: HList](l: L) {
def ::[U](u: U): U :: L = new ::(u, l)
}
}
sealed trait HNil extends HList
object HNil extends HNil
case class ::[+H, +T <: HList](head : H, tail : T) extends HList
trait Generic[T] {
type Repr
def to(t: T): Repr
def from(r: Repr): T
}
object Generic {
type Aux[T, Repr0] = Generic[T] { type Repr = Repr0 }
}
object Test extends App {
case class Bootstrap[+A](head: A, tail: Option[Bootstrap[(A, A)]])
object Bootstrap {
type BootstrapRepr[+A] = A :: Option[Bootstrap[(A, A)]] :: HNil
implicit def bootstrapGen[A]: Generic.Aux[Bootstrap[A], BootstrapRepr[A]] =
new Generic[Bootstrap[A]] {
type Repr = BootstrapRepr[A]
def to(t: Bootstrap[A]): Repr = t.head :: t.tail :: HNil
def from(r: Repr): Bootstrap[A] = Bootstrap(r.head, r.tail.head)
}
}
class Tc[A]
object Tc {
implicit val tcInt: Tc[Int] = new Tc
implicit def tcOpt[A: Tc]: Tc[Option[A]] = new Tc
implicit def tcTuple[A: Tc, B: Tc]: Tc[(A, B)] = new Tc
implicit val tcHNil: Tc[HNil] = new Tc
implicit def tcHCons[H: Tc, T <: HList: Tc]: Tc[H :: T] = new Tc
implicit def tcGen[A, R <: HList](
implicit gen: Generic.Aux[A, R], tcR: => Tc[R]
): Tc[A] = new Tc
}
implicitly[Tc[Bootstrap[Int]]] // error
}
| som-snytt/dotty | tests/neg/byname-implicits-18.scala | Scala | apache-2.0 | 1,532 |
package org.ensime.fixture
import java.io.File
import java.nio.charset.Charset
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import org.ensime.api._
import org.ensime.core.javac.JavaCompiler
import org.ensime.indexer._
import org.ensime.util._
import org.ensime.util.file._
import org.slf4j.LoggerFactory
import scala.collection.immutable.Queue
import scala.concurrent.Await
import scala.concurrent.duration.Duration
trait JavaCompilerFixture {
def withJavaCompiler(
testCode: (TestKitFix, EnsimeConfig, JavaCompiler, JavaStoreReporter, SearchService) => Any
): Any
def runForPositionInCompiledSource(config: EnsimeConfig, cc: JavaCompiler, lines: String*)(testCode: (SourceFileInfo, Int, String, JavaCompiler) => Any): Any = {
val contents = lines.mkString("\\n")
var offset = 0
var points = Queue.empty[(Int, String)]
val re = """@([a-z0-9\\.]*)@"""
re.r.findAllMatchIn(contents).foreach { m =>
points :+= ((m.start - offset, m.group(1)))
offset += ((m.end - m.start))
}
val f = new File(config.rootDir, "testing/simple/src/main/java/org/example/Test1.java")
val file = SourceFileInfo(f, Some(contents.replaceAll(re, "")), None)
cc.askTypecheckFiles(List(file))
assert(points.nonEmpty)
for (pt <- points) {
testCode(file, pt._1, pt._2, cc)
}
}
}
object JavaCompilerFixture {
private[fixture] def create(
config: EnsimeConfig,
reportHandler: ReportHandler,
search: SearchService
)(
implicit
system: ActorSystem,
vfs: EnsimeVFS
): JavaCompiler = {
val indexer = TestProbe()
val parent = TestProbe()
new JavaCompiler(config, reportHandler, search, vfs)
}
}
class JavaStoreReporter extends ReportHandler {
var notes = scala.collection.mutable.HashSet[Note]()
override def messageUser(str: String): Unit = {}
override def clearAllJavaNotes(): Unit = { this.notes.clear() }
override def reportJavaNotes(notes: List[Note]): Unit = { this.notes ++= notes }
}
trait IsolatedJavaCompilerFixture
extends JavaCompilerFixture
with IsolatedEnsimeVFSFixture
with IsolatedTestKitFixture
with IsolatedSearchServiceFixture {
override def withJavaCompiler(
testCode: (TestKitFix, EnsimeConfig, JavaCompiler, JavaStoreReporter, SearchService) => Any
): Any = {
withVFS { implicit vfs =>
withTestKit { testkit =>
import testkit._
withSearchService { (config, search) =>
withEnsimeConfig { config =>
val reportHandler = new JavaStoreReporter
val cc = JavaCompilerFixture.create(config, reportHandler, search)
testCode(testkit, config, cc, reportHandler, search)
}
}
}
}
}
}
| jacobono/ensime-server | core/src/it/scala/org/ensime/fixture/JavaCompilerFixture.scala | Scala | gpl-3.0 | 2,731 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.hibench.sparkbench.micro
import com.intel.hibench.sparkbench.common.IOCommon
import org.apache.spark._
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
object ScalaSort{
implicit def rddToHashedRDDFunctions[K : Ordering : ClassTag, V: ClassTag]
(rdd: RDD[(K, V)]) = new ConfigurableOrderedRDDFunctions[K, V, (K, V)](rdd)
def main(args: Array[String]){
if (args.length != 2){
System.err.println(
s"Usage: $ScalaSort <INPUT_HDFS> <OUTPUT_HDFS>"
)
System.exit(1)
}
val sparkConf = new SparkConf().setAppName("ScalaSort")
.set("spark.shuffle.compress", "false")
.set("spark.io.compression.codec", "org.apache.spark.io.LZFCompressionCodec")
.set("spark.smartCompress", "false")
val sc = new SparkContext(sparkConf)
val parallel = sc.getConf.getInt("spark.default.parallelism", sc.defaultParallelism)
val reducer = IOCommon.getProperty("hibench.default.shuffle.parallelism")
.getOrElse((parallel / 2).toString).toInt
val io = new IOCommon(sc)
val data = io.load[String](args(0)).map((_, 1))
val partitioner = new HashPartitioner(partitions = reducer)
val sorted = data.sortByKeyWithPartitioner(partitioner = partitioner).map(_._1)
io.save(args(1), sorted)
sc.stop()
}
}
| kimihe/Swallow | swallow-benchmark/HiBench-master/sparkbench/micro/src/main/scala/com/intel/sparkbench/micro/ScalaSort.scala | Scala | apache-2.0 | 2,233 |
package chat.tox.antox.utils
import java.io.{BufferedReader, File, InputStreamReader, Reader}
import java.net.URL
import java.nio.charset.Charset
import org.json.JSONObject
import scala.io.Source
object JsonReader {
private def readAll(rd: Reader): String = {
val sb = new StringBuilder()
var cp: Int = rd.read()
while (cp != -1) {
sb.append(cp.toChar)
cp = rd.read()
}
sb.toString()
}
def readFromUrl(url: String): String = {
val is = new URL(url).openStream()
try {
val rd = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8")))
val jsonText = readAll(rd)
jsonText
} catch {
case e: Exception => {
AntoxLog.errorException("JsonReader readJsonFromUrl error", e)
""
}
} finally {
is.close()
}
}
def readJsonFromFile(file: File): JSONObject = {
try {
val source = Source.fromFile(file)
val jsonText = try source.mkString finally source.close()
new JSONObject(jsonText)
} catch {
case e: Exception => {
AntoxLog.errorException("JsonReader readJsonFromFile error", e)
new JSONObject()
}
}
}
}
| wiiam/Antox | app/src/main/scala/chat/tox/antox/utils/JsonReader.scala | Scala | gpl-3.0 | 1,190 |
package trivial.rest.caching
import scala.collection.mutable
/**
* Usage:
*
* class MemoDemo extends Memo {
* // Functions:
* private val func = (input: String) => { /* Do something expensive here */ }
* def memoisedFunction(input: String) = memo { func } (input)
*
* // Methods:
* private def method(input: String) = { /* Do something expensive here */ }
* val f = method _ // Assign this explicitly, not when calling memo (below)
* def memoisedMethod(input: String) = memo { f } (input)
* }
*/
trait Memo {
val cacheOfCaches = mutable.Map.empty[Any, Cache[_, _]]
/**
* Use this for parameterised methods, or anywhere else you can't use the function as a
* key. In all other cases, the simpler memo() method, below, is probably better.
*
* val f = method _
* def memoised(input: String) = memo ("allTheThings") { f } (input)
*/
def memo[I, O](key: Any)(functionToMemorise: I => O): Cache[I, O] =
cacheOfCaches.getOrElseUpdate(key, Cache[I, O](functionToMemorise)).asInstanceOf[Cache[I, O]]
def unMemo(key: Any): Unit = cacheOfCaches.get(key).foreach(_.invalidate())
/**
* Use this for simple functions and un-parameterised methods
*
* BEWARE - uses the function as a key to locate the memoised cache. Does not work when the
* functions are created on the fly, as each is a different instance.
*
* As per memo with a key, except the function is used as the key. Does NOT work for
* methods which assign the function on the fly - use an explicit key via the other memo
* method, instead.
*
* @param functionToMemorise the function to memoise
* @tparam I the input parameter type of function f
* @tparam O the output type of function f
* @return a Cache[I, O]
*/
def memo[I, O](functionToMemorise: I => O): Cache[I, O] =
cacheOfCaches.getOrElseUpdate(functionToMemorise, Cache[I, O](functionToMemorise)).asInstanceOf[Cache[I, O]]
}
case class Cache[I, O](functionToMemorise: I => O) extends (I => O) {
val cache = mutable.Map.empty[I, O]
def apply(input: I) = cache getOrElseUpdate (input, functionToMemorise(input))
def invalidate(): Unit = cache.clear()
} | agmenc/trivial-rest | src/main/scala/trivial/rest/caching/Memo.scala | Scala | mit | 2,184 |
/*
* Copyright 2014 Dominic Scheurer
*
* This file is part of FSAUtilsGUI.
*
* FSAUtilsGUI is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* FSAUtilsGUI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FSAUtilsGUI. If not, see <http://www.gnu.org/licenses/>.
*/
package de.dominicscheurer.fsautils.gui
import scala.io.Source._
import scala.swing._
import java.io.File
import java.io.PrintWriter
class XMLEditor(file: File)
extends SimpleSwingApplication {
def top = new MainFrame {
preferredSize = new Dimension(600, 400)
title = file.getName
import javax.swing.WindowConstants.DO_NOTHING_ON_CLOSE
peer.setDefaultCloseOperation(DO_NOTHING_ON_CLOSE)
override def closeOperation() { close }
val source = fromFile(file)
val content = source.mkString
source.close()
val editorPane = new EditorPane("text/plain", content)
val scrollPane = new ScrollPane()
scrollPane.contents = editorPane
contents = scrollPane
menuBar = new MenuBar {
contents += new Menu("File") {
contents += new MenuItem(Action("Save XML File") {
Some(new PrintWriter(file)).foreach{p => p.write(editorPane.text); p.close}
})
contents += new MenuItem(Action("Close") {
close
})
}
}
}
} | rindPHI/FSAUtilsGUI | src/de/dominicscheurer/fsautils/gui/XMLEditor.scala | Scala | gpl-3.0 | 1,922 |
package com.bwsw.tstreamstransactionserver.netty.server.transactionMetadataService.stateHandler
case class KeyStreamPartition(stream: Int, partition: Int) {
def toByteArray: Array[Byte] = {
val buffer = java.nio.ByteBuffer.allocate(
java.lang.Integer.BYTES + java.lang.Integer.BYTES
)
buffer
.putInt(stream)
.putInt(partition)
.array()
}
}
object KeyStreamPartition {
def fromByteArray(bytes: Array[Byte]): KeyStreamPartition = {
val buffer = java.nio.ByteBuffer.wrap(bytes)
val stream = buffer.getInt
val partition = buffer.getInt
KeyStreamPartition(stream, partition)
}
}
| bwsw/tstreams-transaction-server | src/main/scala/com/bwsw/tstreamstransactionserver/netty/server/transactionMetadataService/stateHandler/KeyStreamPartition.scala | Scala | apache-2.0 | 638 |
package ru.biocad.ig.common.structures.tree
import scala.collection.mutable
/**
* Created with IntelliJ IDEA.
* User: pavel
* Date: 03.06.14
* Time: 10:08
*/
class MultiTree[T](v : Option[T], w : Double) extends AbstractTree[T](v, w) {
private val _children = mutable.HashSet.empty[MultiTree[T]]
def this(v : T, w : Double = 0.0) =
this(Some(v), w)
def this() =
this(None, 0.0)
override def children: Iterable[AbstractTree[T]] =
_children
}
| zmactep/igcat | lib/ig-common/src/main/scala/ru/biocad/ig/common/structures/tree/MultiTree.scala | Scala | bsd-2-clause | 471 |
package de.leanovate.swaggercheck.shrinkable
import com.fasterxml.jackson.core.JsonGenerator
case object CheckJsNull extends CheckJsValue {
override def isNull: Boolean = true
override def generate(json: JsonGenerator): Unit = json.writeNull()
override def shrink: Stream[CheckJsValue] = Stream.empty
} | leanovate/swagger-check | json-schema-gen/src/main/scala/de/leanovate/swaggercheck/shrinkable/CheckJsNull.scala | Scala | mit | 312 |
package com.criteo.vizatra.vizsql
sealed trait Case { def format(str: String): String }
case object UpperCase extends Case { def format(str: String) = str.toUpperCase }
case object LowerCase extends Case { def format(str: String) = str.toLowerCase }
case object CamelCase extends Case {
def format(str: String) = str.headOption.map(_.toString.toUpperCase).getOrElse("") + str.drop(1).toLowerCase
}
case class Style(pretty: Boolean, keywords: Case, identifiers: Case)
object Style {
implicit val default = Style(true, UpperCase, LowerCase)
val compact = Style(false, UpperCase, LowerCase)
}
sealed trait Show {
def ~(o: Show) = Show.Group(this :: o :: Nil)
def ~(o: Option[Show]) = o.map(o => Show.Group(this :: o :: Nil)).getOrElse(this)
def ~-(o: Show) = Show.Group(this :: Show.Whitespace :: o :: Nil)
def ~-(o: Option[Show]) = o.map(o => Show.Group(this :: Show.Whitespace :: o :: Nil)).getOrElse(this)
def ~/(o: Show) = Show.Group(this :: Show.NewLine :: o :: Nil)
def ~/(o: Option[Show]) = o.map(o => Show.Group(this :: Show.NewLine :: o :: Nil)).getOrElse(this)
def ~|(o: Show*) = Show.Group(this :: Show.Indented(Show.Group(o.toList)) :: Nil)
def ~|(o: Option[Show]) = o.map(o => Show.Group(this :: Show.Indented(Show.Group(List(o))) :: Nil)).getOrElse(this)
def toSQL(style: Style): String = Show.toSQL(this, style, None).right.getOrElse(sys.error("WAT?"))
def toSQL(style: Style, placeholders: Placeholders, namedParameters: Map[String,Any], anonymousParameters: List[Any]) = {
Show.toSQL(this, style, Some((placeholders, namedParameters, anonymousParameters)))
}
}
object Show {
def toSQL(show: Show, style: Style, placeholders: Option[(Placeholders,Map[String,Any],List[Any])]): Either[Err,String] = {
val INDENT = " "
case class MissingParameter(err: Err) extends Throwable
def trimRight(parts: List[String]) = {
val maybeT = parts.reverse.dropWhile(_ == INDENT)
if(maybeT.headOption.exists(_ == "\\n")) {
maybeT.tail.reverse
} else parts
}
var pIndex = 0
def print(x: Show, indent: Int, parts: List[String]): List[String] = x match {
case Keyword(k) => parts ++ (style.keywords.format(k) :: Nil)
case Identifier(i) => parts ++ (style.identifiers.format(i) :: Nil)
case Text(x) => parts ++ (x :: Nil)
case Whitespace => parts ++ (" " :: Nil)
case NewLine =>
trimRight(parts) ++ ("\\n" :: (0 until indent).map(_ => INDENT).toList)
case Indented(group) =>
print(NewLine, indent, trimRight(
print(group.copy(items = NewLine :: group.items), indent + 1, parts)
))
case Group(items) =>
items.foldLeft(parts) {
case (parts, i) => print(i, indent, parts)
}
case Parameter(placeholder) =>
placeholders.map {
case (placeholders, namedParameters, anonymousParameters) =>
def param(paramType: Option[Type], value: Any): String = paramType.map { p =>
def rec(value: FilledParameter) : String = value match {
case StringParameter(s) => s"'${s.replace("'", "''")}'"
case IntegerParameter(x) => x.toString
case DateTimeParameter(t) => s"'${t.replace("'", "''")}'"
case SetParameter(set) => "(" + set.map(rec).mkString(", ") + ")"
case RangeParameter(low, high) => rec(low) + " AND " + rec(high)
case x => throw new IllegalArgumentException(x.getClass.toString)
}
rec(Type.convertParam(p, value))
}
.getOrElse {
throw new MissingParameter(ParameterError(
"unresolved parameter", placeholder.pos
))
}
placeholder.name match {
case Some(key) if namedParameters.contains(key) =>
parts ++ (param(placeholders.find(_._1.name.exists(_ == key)).map(_._2), namedParameters(key)) :: Nil)
case None if pIndex < anonymousParameters.size =>
val s = param(placeholders.filterNot(_._1.name.isDefined).drop(pIndex).headOption.map(_._2), anonymousParameters(pIndex))
pIndex = pIndex + 1
parts ++ (s :: Nil)
case x =>
throw new MissingParameter(ParameterError(
s"""missing value for parameter ${placeholder.name.getOrElse("")}""", placeholder.pos
))
}
}.getOrElse {
parts ++ (s"""?${placeholder.name.getOrElse("")}""" :: Nil)
}
}
try {
Right(print(show, 0, Nil).mkString.trim)
} catch {
case MissingParameter(err) => Left(err)
}
}
case class Keyword(keyword: String) extends Show
case class Identifier(identifier: String) extends Show
case class Text(chars: String) extends Show
case class Indented(group: Group) extends Show
case class Parameter(placeholder: Placeholder) extends Show
case class Group(items: List[Show]) extends Show
case object Whitespace extends Show
case object NewLine extends Show
def line = NewLine
def nest(show: Show*) = Indented(Group(show.toList))
def keyword(str: String) = Keyword(str)
def ident(str: String) = Identifier(str)
def join(items: List[Show], separator: Show) = {
Group(items.dropRight(1).flatMap(_ :: separator :: Nil) ++ items.lastOption.map(_ :: Nil).getOrElse(Nil))
}
def ~?(placeholder: Placeholder) = Parameter(placeholder)
implicit def toText(str: String) = Text(str)
}
| criteo/vizsql | shared/src/main/scala/com/criteo/vizatra/vizsql/Show.scala | Scala | apache-2.0 | 5,520 |
package sgl.util
import org.scalatest.funsuite.AnyFunSuite
trait RandomProviderAbstractSuite extends AnyFunSuite with RandomProvider {
test("Two instances from same seed produces same stream of random data") {
val r1 = Random.fromSeed(77)
val r2 = Random.fromSeed(77)
assert(r1.nextInt() === r2.nextInt())
assert(r1.nextInt() === r2.nextInt())
assert(r1.nextLong() === r2.nextLong())
}
test("A Random instance that reset the seed reproduces the same stream of random data") {
val r = Random.fromSeed(12)
val n1 = r.nextInt()
val n2 = r.nextInt()
val n3 = r.nextLong()
r.setSeed(12)
assert(n1 === r.nextInt())
assert(n2 === r.nextInt())
assert(n3 === r.nextLong())
}
}
class DefaultRandomProviderSuite extends RandomProviderAbstractSuite with DefaultRandomProvider
| regb/scala-game-library | core/src/test/scala/sgl/util/RandomProviderSuite.scala | Scala | mit | 830 |
class C1 {
def a {}
case class A
}
class C2 extends C1 {
def b {}
case class B
}
object O extends C2 {
def c {}
case class C
println(/* line: 2 */a)
println(/* line: 7 */b)
println(/* line: 12 */c)
println(super./* line: 2 */a)
println(super./* line: 7 */b)
println(super./* resolved: false */c)
println(/* */A.getClass)
println(classOf[/* line: 3 */A])
println(/* */B.getClass)
println(classOf[/* line: 8 */B])
println(/* */C.getClass)
println(classOf[/* line: 13 */C])
println(super./* */A.getClass)
println(classOf[super./* line: 3 */A])
println(super./* */B.getClass)
println(classOf[super./* line: 8 */B])
println(super./* resolved: false */C.getClass)
println(classOf[super./* resolved: false */C])
} | ilinum/intellij-scala | testdata/resolve2/inheritance/super/multiple/Object.scala | Scala | apache-2.0 | 764 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import play.api.data.Form
import play.api.data.Forms.single
object DispatchFromWarehouseForm extends RequiredBooleanForm {
override val errorMsg = "validation.dispatchFromWarehouse.missing"
val value = "value"
val form: Form[Boolean] = Form(
single(value -> requiredBoolean)
)
}
| hmrc/vat-registration-frontend | app/forms/DispatchFromWarehouseForm.scala | Scala | apache-2.0 | 915 |
import java.util.concurrent.TimeUnit
import actors.Messages._
import actors._
import akka.actor.{ActorSystem, ActorRef, Inbox, Props}
import scala.concurrent.duration.{FiniteDuration, Duration}
object Main {
lazy val system = ActorSystem("Test-Simulator")
val injectorIds = Array(10, 494, 222)
lazy val reaper = system.actorOf(Props[Reaper], name = "Reaper")
lazy val injectors: Map[Int, ActorRef] = injectorIds.map { id =>
val injector = system.actorOf(Props(new ExampleInjector(id)), name = s"injector-for-$id")
reaper ! WatchMe(injector)
id -> injector
}.toMap
def allInjectors(message: AnyRef) = injectors.values.foreach(_ ! message)
def main(args: Array[String]) {
println("hi there!")
Thread.sleep(50)
val in = Inbox.create(system)
allInjectors(Startup)
// do work
Thread.sleep(50)
injectors(10) ! Inject("a message")
injectors(10) ! LoginSession(SessionDetails("dave", "secret"))
injectors(10) ! Inject("another message")
Thread.sleep(50)
allInjectors(PrintStatus)
Thread.sleep(50)
in send (injectors(10), PollStatus)
val any = in.receive(FiniteDuration(10, TimeUnit.SECONDS))
println(s"polling returned $any")
// do work
Thread.sleep(50)
allInjectors(Shutdown)
Thread.sleep(50)
// try {
// val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds, Manager.Shutdown)
// Await.result(stopped, 6 seconds)
// } catch {
// case e: akka.pattern.AskTimeoutException =>
// }
system.awaitTermination(Duration(10, TimeUnit.SECONDS))
System.exit(0)
}
}
| barkhorn/actortester | src/main/scala/Main.scala | Scala | apache-2.0 | 1,613 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.