repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
kornelrabczak/scalac-simple-profiling
|
build.sbt
|
<gh_stars>0
scalaVersion := "2.13.2"
version := "0.1.0-SNAPSHOT"
organization := "com.thecookiezen"
organizationName := "thecookiezen"
scalacOptions ++= Seq("-deprecation", "-feature", "-Xfatal-warnings")
lazy val root = (project in file("."))
.settings(
name := "scalac-simple-profiler",
libraryDependencies += "org.scala-lang" % "scala-compiler" % "2.13.2",
libraryDependencies += "com.lihaoyi" %% "pprint" % "0.5.9",
libraryDependencies += "org.scalatest" %% "scalatest" % "3.1.1" % Test
)
|
kornelrabczak/scalac-simple-profiling
|
src/main/scala/com/thecookiezen/tools/Logger.scala
|
package com.thecookiezen.tools
import scala.reflect.internal.util.NoPosition
final class Logger[G <: scala.tools.nsc.Global](val global: G) {
def info[T: pprint.TPrint](header: String, value: T): Unit = {
val tokens = pprint.tokenize(value, height = 100000000).mkString
info(s"$header:\n$tokens")
}
def info(msg: String): Unit = global.reporter.echo(NoPosition, msg)
}
|
kornelrabczak/scalac-simple-profiling
|
src/main/scala/com/thecookiezen/metrics/Timer.scala
|
<reponame>kornelrabczak/scalac-simple-profiling
package com.thecookiezen.metrics
import java.util.concurrent.atomic.AtomicLong
import com.thecookiezen.metrics.Timer.TimerSnapshot
import scala.runtime.LongRef
class Timer(val prefix: String) {
private[this] val threadNanos = new ThreadLocal[LongRef] {
override def initialValue() = {
new LongRef(0)
}
}
private val totalNanos = new AtomicLong
def nanos: Long = totalNanos.get
def start: TimerSnapshot = (threadNanos.get.elem, System.nanoTime())
def stop(prev: TimerSnapshot): Unit = {
val (nanos0, start) = prev
val newThreadNanos = nanos0 + System.nanoTime() - start
val threadNanosCount = threadNanos.get
val diff = newThreadNanos - threadNanosCount.elem
threadNanosCount.elem = newThreadNanos
totalNanos.addAndGet(diff)
}
}
object Timer {
def apply(prefix: String): Timer = new Timer(prefix)
type TimerSnapshot = (Long, Long)
}
|
kornelrabczak/scalac-simple-profiling
|
project/plugins.sbt
|
<filename>project/plugins.sbt
addSbtPlugin("ch.epfl.scala" % "sbt-bloop" % "1.4.1")
addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.10")
|
nesstest/onslocal_bo
|
build.sbt
|
<reponame>nesstest/onslocal_bo<gh_stars>0
name := """onslocal-bo"""
version := "1.0-SNAPSHOT"
lazy val root = (project in file(".")).enablePlugins(PlayJava)
scalaVersion := "2.11.7"
libraryDependencies ++= Seq(
javaJdbc,
cache,
javaWs,
javaJpa,
"org.eclipse.persistence" % "eclipselink" % "2.6.2",
"org.postgresql" % "postgresql" % "9.4.1208.jre7"
)
EclipseKeys.projectFlavor := EclipseProjectFlavor.Java // Java project. Don't expect Scala IDE
EclipseKeys.createSrc := EclipseCreateSrc.ValueSet(EclipseCreateSrc.ManagedClasses, EclipseCreateSrc.ManagedResources) // Use .class files instead of generated .scala files for views and routes
EclipseKeys.preTasks := Seq(compile in Compile)
PlayKeys.externalizeResources := false
fork in run := true
|
jfalkner/file_backed_logs
|
src/test/scala/jfalkner/logs/LogsSpec.scala
|
package jfalkner.logs
import java.nio.file.Files
import java.time.Instant
import org.apache.commons.io.FileUtils
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
class LogsSpec extends Specification {
val (a, b, c) = (Foo("A"), Foo("B"), Foo("C"))
val suffix = ".example.csv"
"Logs" should {
"log and read single value" in {
withCleanup{ logs =>
def l = logs.make[Foo](suffix)
l.log(a)
Set(a) mustEqual l.load
}
}
"log and read multiple values" in {
withCleanup{ logs =>
def l = logs.make[Foo](suffix)
l.log(a)
l.log(b)
l.log(c)
Set(a, b, c) mustEqual l.load
}
}
"avoid overwriting existing log files" in {
withCleanup{ logs =>
val ts = Instant.now()
logs.log(suffix, ts)(a)
logs.log(suffix, ts)(b)
logs.log(suffix, ts)(c)
Set(a, b, c) mustEqual logs.load[Foo](suffix)
}
}
"squash multiple files down to one" in {
withCleanup{ logs =>
def l = logs.make[Foo](suffix)
val (pa, pb, pc) = (l.log(a), l.log(b), l.log(c))
val all = l.squash()
all.getFileName.toString mustEqual s"all$suffix"
Set(a, b, c) mustEqual l.load()
Seq(pa, pb, pc, all).map(_.toFile.exists) mustEqual(Seq(false, false, false, true))
}
}
"logAll(Set)" in {
withCleanup{ logs =>
def l = logs.make[Foo]("set")
l.logAll(Set(a, b, c))
Set(a, b, c) mustEqual logs.load[Foo]("set")
}
}
"logAll(Seq)" in {
withCleanup{ logs =>
def l = logs.make[Foo]("seq")
l.logAll(Seq(a, b, c))
Seq(a, b, c) mustEqual logs.load[Foo]("seq")
}
}
"clear() removes old entries" in {
withCleanup{ logs =>
def l = logs.make[Foo]("clear")
l.logAll(Seq(a, b))
Seq(a, b) mustEqual l.load()
l.clear()
l.logAll(Seq(b, c))
Seq(b, c) mustEqual l.load()
}
}
}
def withCleanup(f: (Logs) => MatchResult[Any]) : MatchResult[Any] = {
val dir = Files.createTempDirectory("logs")
try {
f(new Logs {
override val logsPath = dir
})
}
finally {
Seq(dir).foreach(p => FileUtils.deleteDirectory(p.toFile))
}
}
}
case class Foo(a: String)
|
jfalkner/file_backed_logs
|
src/main/scala/jfalkner/logs/Logs.scala
|
package jfalkner.logs
import java.io.File
import java.nio.file.{Files, Path, Paths}
import java.time.Instant
import jfalkner.cc2csv.Csv.{marshall, unmarshall}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
// Logic related to marshall/unmarshall-ing log entries
trait Logs {
val logsPath: Path
lazy val ignoredDirs: Set[String] = Set()
def make[P <: Product: ClassTag](postfix: String): Logger[P] = new Logger[P](postfix)
def filter(f: File, isdir: Boolean): Boolean = f.isDirectory == isdir && !ignoredDirs.contains(f.getName)
def ls(path: Path, isdir: Boolean = false): Set[Path] =
Files.newDirectoryStream(path).asScala.toList.filter(x => filter(x.toFile, isdir)).toSet
def lsdir(path: Path): Set[Path] = ls(path, true)
def resolve(postfix: String, ts: Instant = Instant.now()) : Path = {
val path = logsPath.resolve(s"$ts$postfix")
if (Files.exists(path)) resolve(postfix, ts.plusMillis(1)) else path
}
// save multiple
def logAll(postfix: String, ts: Instant = Instant.now())(values: Traversable[Product]) : Option[Path] =
if (!values.isEmpty) Some(Files.write(resolve(postfix, ts), values.map(marshall).mkString("\n").getBytes)) else None
// save single
def log(postfix: String, ts: Instant = Instant.now())(value: Product) : Path =
Files.write(resolve(postfix, ts), marshall(value).getBytes)
// loads existing
def load[T <: Product: ClassTag](postfix: String): Set[T] =
ls(logsPath).filter(_.toString.endsWith(postfix)).flatMap(f => Files.readAllLines(f).asScala.map(unmarshall[T]))
// condense a bunch down to done log -- write all then delete
def squash[T <: Product: ClassTag](postfix: String): Path = {
val all = Files.write(logsPath.resolve(s"all$postfix"), load[T](postfix).map(marshall).mkString("\n").getBytes)
ls(logsPath).filter(_.toString.endsWith(postfix)).filter(!_.toString.endsWith(s"all$postfix")).foreach(Files.deleteIfExists)
all
}
// postfix-specific logger
class Logger[P <: Product: ClassTag](postfix: String) {
def log(value: P): Path = Logs.this.log(postfix)(value)
def logAll(values: Traversable[P]): Option[Path] = Logs.this.logAll(postfix)(values)
def load(): Set[P] = Logs.this.load[P](postfix)
def squash(): Path = Logs.this.squash[P](postfix)
def clear(): Unit = Files.delete(squash)
}
}
|
mosypov/future
|
future-scala/src/main/scala/io/trane/future/scala/Promise.scala
|
package io.trane.future.scala
import io.trane.future.{ Promise => JPromise, Future => JFuture }
import scala.util.{ Try, Success, Failure }
import scala.annotation.unchecked.uncheckedVariance
class Promise[T](private[trane] val underlying: JPromise[T @uncheckedVariance]) extends AnyVal {
def toJava[B >: T]: JPromise[B] = underlying.asInstanceOf[JPromise[B]]
def future: Future[T] = new Future(underlying)
def isCompleted: Boolean = underlying.isDefined
def complete(result: Try[T]) =
if (tryComplete(result)) this else throw new IllegalStateException("Promise already completed.")
def tryComplete(result: Try[T]): Boolean =
result match {
case Success(v) => underlying.becomeIfEmpty(JFuture.value(v))
case Failure(ex) => underlying.becomeIfEmpty(JFuture.exception(ex))
}
final def completeWith(other: Future[T]) = tryCompleteWith(other)
def tryCompleteWith(other: Future[T]) = {
underlying.becomeIfEmpty(other.toJava)
this
}
def success(value: T) = {
underlying.setValue(value)
this
}
def trySuccess(value: T): Boolean =
underlying.becomeIfEmpty(JFuture.value(value))
def failure(cause: Throwable) = {
underlying.setException(cause)
this
}
def tryFailure(cause: Throwable): Boolean =
underlying.becomeIfEmpty(JFuture.exception(cause))
}
object Promise {
def apply[T](): Promise[T] = new Promise(JPromise.apply())
def failed[T](exception: Throwable): Promise[T] = {
val p = JPromise.apply[T]()
p.setException(exception)
new Promise(p)
}
def successful[T](result: T): Promise[T] = {
val p = JPromise.apply[T]()
p.setValue(result)
new Promise(p)
}
def fromTry[T](result: Try[T]): Promise[T] =
result match {
case Success(v) => successful(v)
case Failure(ex) => failed(ex)
}
}
|
mosypov/future
|
future-scala/src/main/scala/io/trane/future/scala/Await.scala
|
package io.trane.future.scala
import scala.concurrent.duration.Duration
object Await {
def ready[T](awaitable: Future[T], atMost: Duration) = {
awaitable.underlying.join(toJavaDuration(atMost))
this
}
def result[T](awaitable: Future[T], atMost: Duration): T =
awaitable.underlying.get(toJavaDuration(atMost))
private final def toJavaDuration(d: Duration) =
if(d.isFinite()) java.time.Duration.ofMillis(d.toMillis)
else java.time.Duration.ofNanos(Long.MaxValue)
}
|
mosypov/future
|
future-scala/src/main/scala/io/trane/future/scala/package.scala
|
<filename>future-scala/src/main/scala/io/trane/future/scala/package.scala<gh_stars>100-1000
package io.trane.future
import io.trane.future.{ Future => JFuture, Promise => JPromise }
package object scala {
implicit class toScalaPromise[T](val p: JPromise[T]) extends AnyVal {
def toScala: Promise[T] = new Promise(p)
}
implicit class toScalaFuture[T](val fut: JFuture[T]) extends AnyVal {
def toScala: Future[T] = new Future(fut)
}
implicit class toJavaPromise[T](val p: Promise[T]) {
def toJava: JPromise[T] = p.underlying
}
implicit class toJavaFuture[T](val fut: Future[T]) {
def toJava: JFuture[T] = fut.underlying
}
}
|
mosypov/future
|
future-scala/src/main/scala/io/trane/future/scala/Future.scala
|
<gh_stars>100-1000
package io.trane.future.scala
import io.trane.future.{ Future => JFuture, Promise => JPromise }
import scala.util.Try
import scala.util.Failure
import scala.util.Success
import java.util.concurrent.TimeUnit
import java.util.function.Consumer
import java.util.function.{ Function => JavaFunction }
import java.util.function.Predicate
import scala.reflect.ClassTag
import scala.collection.generic.CanBuildFrom
import java.util.Collection
import scala.concurrent.Awaitable
import scala.concurrent.duration.Duration
import scala.concurrent.CanAwait
import scala.annotation.unchecked.uncheckedVariance
import io.trane.future.Transformer
import io.trane.future.Responder
object Future {
private[Future] val toBoxed = Map[Class[_], Class[_]](
classOf[Boolean] -> classOf[java.lang.Boolean],
classOf[Byte] -> classOf[java.lang.Byte],
classOf[Char] -> classOf[java.lang.Character],
classOf[Short] -> classOf[java.lang.Short],
classOf[Int] -> classOf[java.lang.Integer],
classOf[Long] -> classOf[java.lang.Long],
classOf[Float] -> classOf[java.lang.Float],
classOf[Double] -> classOf[java.lang.Double],
classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
val never: Future[Nothing] = new Future(JFuture.never())
val unit: Future[Unit] = new Future(JFuture.value({}))
def failed[T](exception: Throwable): Future[T] = new Future(JFuture.exception(exception))
def successful[T](result: T): Future[T] = new Future(JFuture.value(result))
def fromTry[T](result: Try[T]): Future[T] =
result match {
case Success(value) => successful(value)
case Failure(exception) => failed(exception)
}
def apply[T](body: => T): Future[T] = new Future(JFuture.apply(() => body))
private[this] def toJList[A, M[X] <: TraversableOnce[X]](in: M[Future[A]]) = {
import scala.collection.JavaConverters._
in.toSeq.map(_.toJava).asJava.asInstanceOf[java.util.List[JFuture[A]]]
}
def sequence[A, M[X] <: TraversableOnce[X]](in: M[Future[A]])(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] = {
new Future(JFuture.collect(toJList(in))).map { jList =>
val builder = cbf()
val size = jList.size
var i = 0
while (i < size) {
builder += jList.get(i)
i += 1;
}
builder.result()
}
}
def firstCompletedOf[T](futures: TraversableOnce[Future[T]]): Future[T] = {
import scala.collection.JavaConverters._
new Future(JFuture.firstCompletedOf(toJList(futures)))
}
def find[T](futures: scala.collection.immutable.Iterable[Future[T]])(p: T => Boolean): Future[Option[T]] = {
def searchNext(i: Iterator[Future[T]]): Future[Option[T]] =
if (!i.hasNext) successful[Option[T]](None)
else {
i.next().transformWith {
case Success(r) if p(r) => successful(Some(r))
case other => searchNext(i)
}
}
searchNext(futures.iterator)
}
def foldLeft[T, R](futures: scala.collection.immutable.Iterable[Future[T]])(zero: R)(op: (R, T) => R): Future[R] =
foldNext(futures.iterator, zero, op)
private[this] def foldNext[T, R](i: Iterator[Future[T]], prevValue: R, op: (R, T) => R): Future[R] =
if (!i.hasNext) successful(prevValue)
else i.next().flatMap { value => foldNext(i, op(prevValue, value), op) }
def reduceLeft[T, R >: T](futures: scala.collection.immutable.Iterable[Future[T]])(op: (R, T) => R): Future[R] = {
val i = futures.iterator
if (!i.hasNext) failed(new NoSuchElementException("reduceLeft attempted on empty collection"))
else i.next() flatMap { v => foldNext(i, v, op) }
}
def traverse[A, B, M[X] <: TraversableOnce[X]](in: M[A])(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] =
new Future(JFuture.collect(toJList(in.map(fn)))).map { jList =>
val builder = cbf()
jList.forEach(builder += _)
builder.result()
}
}
class Future[+T](private[trane] val underlying: JFuture[T @uncheckedVariance]) extends AnyVal {
def onComplete[U](f: Try[T] => U): Unit =
underlying.respond(new Responder[T] {
def onException(ex: Throwable) = f(Failure(ex))
def onValue(v: T) = f(Success(v))
})
def isCompleted: Boolean = underlying.isDefined()
def value: Option[Try[T]] =
if (underlying.isDefined())
Some(Try(underlying.get(java.time.Duration.ofMillis(0))))
else
None
def failed: Future[Throwable] =
new Future(
underlying.transformWith(new Transformer[T, JFuture[Throwable]] {
override def onValue(value: T) = JFuture.exception(new NoSuchElementException("Future.failed not completed with a throwable."))
override def onException(ex: Throwable) = JFuture.value(ex)
}))
def foreach[U](f: T => U): Unit = underlying.onSuccess(v => f(v))
def transform[S](s: T => S, f: Throwable => Throwable): Future[S] =
new Future(
underlying.transformWith(new Transformer[T, JFuture[S]] {
override def onValue(value: T) = JFuture.value(s(value))
override def onException(ex: Throwable) = JFuture.exception(f(ex))
}))
def transform[S](f: Try[T] => Try[S]): Future[S] = {
def toJFuture[T](t: Try[T]): JFuture[T] =
t match {
case Success(r) => JFuture.value(r)
case Failure(ex) => JFuture.exception(ex)
}
new Future[S](
underlying.transformWith(new Transformer[T, JFuture[S]] {
override def onValue(value: T) = toJFuture(f(Success(value)))
override def onException(ex: Throwable) = toJFuture(f(Failure(ex)))
}))
}
def transformWith[S](f: Try[T] => Future[S]): Future[S] =
new Future(
underlying.transformWith(new Transformer[T, JFuture[S]] {
override def onValue(value: T) = f(Success(value)).underlying
override def onException(ex: Throwable) = f(Failure(ex)).underlying
}))
def map[S](f: T => S): Future[S] = new Future[S](underlying.map(v => f(v)))
def flatMap[S](f: T => Future[S]): Future[S] =
new Future[S](underlying.flatMap[S](v => f(v).underlying))
def flatten[S](implicit ev: T <:< Future[S]): Future[S] =
new Future(JFuture.flatten(underlying.asInstanceOf[JFuture[JFuture[S]]]))
def filter(p: T => Boolean): Future[T] =
map { r => if (p(r)) r else throw new NoSuchElementException("Future.filter predicate is not satisfied") }
final def withFilter(p: T => Boolean): Future[T] = filter(p)
def collect[S](pf: PartialFunction[T, S]): Future[S] =
new Future[S](underlying.map {
r => pf.applyOrElse(r, (t: T) => throw new NoSuchElementException("Future.collect partial function is not defined at: " + t))
})
def recover[U >: T](pf: PartialFunction[Throwable, U]): Future[U] =
new Future(
underlying.transformWith(new Transformer[T, JFuture[U]] {
override def onValue(value: T) = JFuture.value(value)
override def onException(ex: Throwable) = JFuture.value(pf(ex))
}))
def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] =
new Future(
underlying.transformWith(new Transformer[T, JFuture[U]] {
override def onValue(value: T) = JFuture.value(value)
override def onException(ex: Throwable) = pf(ex).underlying
}))
def zip[U](that: Future[U]): Future[(T, U)] =
new Future(underlying.biMap[U, (T, U)](that.underlying, (a, b) => (a, b)))
def zipWith[U, R](that: Future[U])(f: (T, U) => R): Future[R] =
new Future(underlying.biMap[U, R](that.underlying, (a, b) => f(a, b)))
def fallbackTo[U >: T](that: Future[U]): Future[U] =
recoverWith(PartialFunction(_ => that))
def mapTo[S](implicit tag: ClassTag[S]): Future[S] = {
val boxedClass = {
val c = tag.runtimeClass
if (c.isPrimitive) Future.toBoxed(c) else c
}
require(boxedClass ne null)
map(s => boxedClass.cast(s).asInstanceOf[S])
}
def andThen[U](pf: PartialFunction[Try[T], U]): Future[T] =
new Future(underlying.respond(new Responder[T] {
def onException(ex: Throwable) = pf.applyOrElse[Try[T], Any](Failure(ex), Predef.identity[Try[T]])
def onValue(v: T) = pf.applyOrElse[Try[T], Any](Success(v), Predef.identity[Try[T]])
}))
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/TransactionalWrite.scala
|
<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import com.engineplus.star.meta.CommitType
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.datasources.{BasicWriteJobStatsTracker, FileFormatWriter, WriteJobStatsTracker}
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.schema.{InvariantCheckerExec, Invariants, SchemaUtils}
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.utils.{DataFileInfo, MaterialViewInfo}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
trait TransactionalWrite {
self: Transaction =>
protected def snapshot: Snapshot
protected var commitType: Option[CommitType]
protected var shortTableName: Option[String]
protected var materialInfo: Option[MaterialViewInfo]
protected var hasWritten = false
protected def getCommitter(outputPath: Path): DelayedCommitProtocol =
new DelayedCommitProtocol("star", outputPath.toString, None)
/**
* Normalize the schema of the query, and return the QueryExecution to execute. The output
* attributes of the QueryExecution may not match the attributes we return as the output schema.
* This is because streaming queries create `IncrementalExecution`, which cannot be further
* modified. We can however have the Parquet writer use the physical plan from
* `IncrementalExecution` and the output schema provided through the attributes.
*/
protected def normalizeData(data: Dataset[_]): (QueryExecution, Seq[Attribute]) = {
val normalizedData = SchemaUtils.normalizeColumnNames(tableInfo.schema, data)
val cleanedData = SchemaUtils.dropNullTypeColumns(normalizedData)
val queryExecution = if (cleanedData.schema != normalizedData.schema) {
// For batch executions, we need to use the latest DataFrame query execution
cleanedData.queryExecution
} else {
// For streaming workloads, we need to use the QueryExecution created from StreamExecution
data.queryExecution
}
queryExecution -> cleanedData.queryExecution.analyzed.output
}
protected def getPartitioningColumns(rangePartitionSchema: StructType,
hashPartitionSchema: StructType,
output: Seq[Attribute],
colsDropped: Boolean): Seq[Attribute] = {
val rangePartitionColumns: Seq[Attribute] = rangePartitionSchema.map { col =>
// schema is already normalized, therefore we can do an equality check
output.find(f => f.name == col.name)
.getOrElse {
throw StarLakeErrors.partitionColumnNotFoundException(col.name, output)
}
}
val hashPartitionColumns: Seq[Attribute] = hashPartitionSchema.map { col =>
// schema is already normalized, therefore we can do an equality check
output.find(f => f.name == col.name)
.getOrElse {
throw StarLakeErrors.partitionColumnNotFoundException(col.name, output)
}
}
val partitionColumns = rangePartitionColumns ++ hashPartitionColumns
if (partitionColumns.nonEmpty && partitionColumns.length == output.length) {
throw StarLakeErrors.nonPartitionColumnAbsentException(colsDropped)
}
rangePartitionColumns
}
def writeFiles(data: Dataset[_]): Seq[DataFileInfo] = writeFiles(data, None, isCompaction = false)
def writeFiles(data: Dataset[_], writeOptions: Option[StarLakeOptions]): Seq[DataFileInfo] =
writeFiles(data, writeOptions, isCompaction = false)
def writeFiles(data: Dataset[_], isCompaction: Boolean): Seq[DataFileInfo] =
writeFiles(data, None, isCompaction = isCompaction)
/**
* Writes out the dataframe after performing schema validation. Returns a list of
* actions to append these files to the reservoir.
*/
def writeFiles(oriData: Dataset[_],
writeOptions: Option[StarLakeOptions],
isCompaction: Boolean): Seq[DataFileInfo] = {
var updateMaterialView = false
if (writeOptions.isDefined) {
updateMaterialView = writeOptions.get.updateMaterialView
}
//can't update material view
if (snapshot.getTableInfo.is_material_view && !updateMaterialView) {
throw StarLakeErrors.updateMaterialViewWithCommonOperatorException()
}
val data = if (tableInfo.hash_partition_columns.nonEmpty) {
oriData.repartition(tableInfo.bucket_num, tableInfo.hash_partition_columns.map(col): _*)
} else {
oriData
}
hasWritten = true
val spark = data.sparkSession
spark.sessionState.conf.setConfString(SQLConf.UNSUPPORTED_OPERATION_CHECK_ENABLED.key, "false")
//If this is the first time to commit, you need to check if there is data in the path where the table is located.
//If there has data, you cannot create a new table
if (isFirstCommit) {
val table_path = new Path(table_name)
val fs = table_path.getFileSystem(spark.sessionState.newHadoopConf())
if (fs.exists(table_path) && fs.listStatus(table_path).nonEmpty) {
throw StarLakeErrors.failedCreateTableException(table_name)
}
}
val rangePartitionSchema = tableInfo.range_partition_schema
val hashPartitionSchema = tableInfo.hash_partition_schema
val outputPath = tableInfo.table_path
val (queryExecution, output) = normalizeData(data)
val partitioningColumns =
getPartitioningColumns(
rangePartitionSchema,
hashPartitionSchema,
output,
output.length < data.schema.size)
val committer = getCommitter(outputPath)
//add not null check to primary key
val invariants = Invariants.getFromSchema(tableInfo.schema, spark)
SQLExecution.withNewExecutionId(queryExecution) {
val outputSpec = FileFormatWriter.OutputSpec(
outputPath.toString,
Map.empty,
output)
val physicalPlan = if (isCompaction) {
queryExecution.executedPlan
} else {
InvariantCheckerExec(queryExecution.executedPlan, invariants)
}
val statsTrackers: ListBuffer[WriteJobStatsTracker] = ListBuffer()
val basicWriteJobStatsTracker = new BasicWriteJobStatsTracker(
new SerializableConfiguration(spark.sessionState.newHadoopConf()),
BasicWriteJobStatsTracker.metrics)
statsTrackers.append(basicWriteJobStatsTracker)
val hashBucketSpec = tableInfo.hash_column match {
case "" => None
case _ => Option(BucketSpec(tableInfo.bucket_num,
tableInfo.hash_partition_columns,
tableInfo.hash_partition_columns))
}
val sqlConf = spark.sessionState.conf
val writeOptions = new mutable.HashMap[String, String]()
if (sqlConf.getConf(StarLakeSQLConf.PARQUET_COMPRESSION_ENABLE)) {
writeOptions.put("compression", sqlConf.getConf(StarLakeSQLConf.PARQUET_COMPRESSION))
} else {
writeOptions.put("compression", "uncompressed")
}
// Map("parquet.block.size" -> spark.sessionState.conf.getConf(StarLakeSQLConf.PARQUET_BLOCK_SIZE).toString)
FileFormatWriter.write(
sparkSession = spark,
plan = physicalPlan,
fileFormat = snapshot.fileFormat, // TODO doesn't support changing formats.
committer = committer,
outputSpec = outputSpec,
hadoopConf = spark.sessionState.newHadoopConfWithOptions(snapshot.getConfiguration),
partitionColumns = partitioningColumns,
bucketSpec = hashBucketSpec,
statsTrackers = statsTrackers,
options = writeOptions.toMap)
}
val is_base_file = if (commitType.nonEmpty && commitType.get.name.equals("CompactionCommit")) {
true
} else {
false
}
val partitionCols = tableInfo.range_partition_columns
//Returns the absolute path to the file
val real_write_cols = data.schema.fieldNames.filter(!partitionCols.contains(_)).mkString(",")
committer.addedStatuses.map(file => file.copy(
file_exist_cols = real_write_cols,
is_base_file = is_base_file))
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/commands/UpdateSuiteBase.scala
|
<reponame>engine-plus/StarLake<filename>src/test/scala/org/apache/spark/sql/star/commands/UpdateSuiteBase.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import java.io.File
import java.util.Locale
import com.engineplus.star.tables.StarTable
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.test.StarLakeTestUtils
import org.apache.spark.sql.test.{SQLTestUtils, SharedSparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.util.Utils
import org.scalatest.BeforeAndAfterEach
import scala.language.implicitConversions
abstract class UpdateSuiteBase
extends QueryTest
with SharedSparkSession
with BeforeAndAfterEach
with SQLTestUtils
with StarLakeTestUtils {
import testImplicits._
var tempDir: File = _
var snapshotManagement: SnapshotManagement = _
protected def tempPath = tempDir.getCanonicalPath
protected def readStarLakeTable(path: String): DataFrame = {
spark.read.format("star").load(path)
}
override def beforeEach() {
super.beforeEach()
tempDir = Utils.createTempDir()
snapshotManagement = SnapshotManagement(new Path(tempPath))
}
override def afterEach() {
try {
Utils.deleteRecursively(tempDir)
try {
StarTable.forPath(snapshotManagement.table_name).dropTable()
} catch {
case e: Exception =>
}
} finally {
super.afterEach()
}
}
protected def executeUpdate(tarTable: String, set: Seq[String], where: String): Unit = {
executeUpdate(tarTable, set.mkString(", "), where)
}
protected def executeUpdate(tarTable: String, set: String, where: String = null): Unit
protected def append(df: DataFrame, partitionBy: Seq[String] = Nil): Unit = {
val writer = df.write.format("star").mode("append")
if (partitionBy.nonEmpty) {
writer.partitionBy(partitionBy: _*)
}
writer.save(snapshotManagement.table_name)
}
protected def appendHashPartition(df: DataFrame, partitionBy: Seq[String] = Nil): Unit = {
val writer = df.write.format("star").mode("append")
if (partitionBy.nonEmpty) {
writer.partitionBy(partitionBy: _*)
}
writer
.option("hashPartitions", "hash")
.option("hashBucketNum", "2")
.save(snapshotManagement.table_name)
}
protected def executeUpsert(df: DataFrame): Unit = {
StarTable.forPath(snapshotManagement.table_name)
.upsert(df)
}
implicit def jsonStringToSeq(json: String): Seq[String] = json.split("\n")
protected def checkUpdate(condition: Option[String],
setClauses: String,
expectedResults: Seq[Row],
colNames: Seq[String],
tableName: Option[String] = None): Unit = {
executeUpdate(tableName.getOrElse(s"star.`$tempPath`"), setClauses, where = condition.orNull)
checkAnswer(readStarLakeTable(tempPath).select(colNames.map(col): _*), expectedResults)
}
test("basic case") {
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"))
checkUpdate(condition = None, setClauses = "key = 1, value = 2",
expectedResults = Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Nil,
Seq("key", "value"))
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - Star table by path - Partition=$isPartitioned") {
withTable("starTable") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(
condition = Some("key >= 1"),
setClauses = "value = key + value, key = key + 1",
expectedResults = Row(0, 3) :: Row(2, 5) :: Row(2, 2) :: Row(3, 4) :: Nil,
Seq("key", "value"),
tableName = Some(s"star.`$tempPath`"))
}
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - Star table by path with hash partition - Partition=$isPartitioned") {
withTable("starTable") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
appendHashPartition(Seq((2, 2, 2), (1, 4, 4), (1, 1, 1), (0, 3, 3))
.toDF("key", "hash", "value"), partitions)
checkUpdate(
condition = Some("key >= 1"),
setClauses = "value = key + value, key = key + 1",
expectedResults = Row(0, 3, 3) :: Row(2, 4, 5) :: Row(2, 1, 2) :: Row(3, 2, 4) :: Nil,
Seq("key", "hash", "value"),
tableName = Some(s"star.`$tempPath`"))
}
}
}
Seq(true).foreach { isPartitioned =>
test(s"basic update - with hash partition - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
appendHashPartition(Seq((2, 2, 2), (1, 1, 4), (1, 2, 1), (0, 0, 3))
.toDF("key", "hash", "value"), partitions)
checkUpdate(condition = Some("value > 2"), setClauses = "value = 1",
expectedResults = Row(2, 2, 2) :: Row(1, 1, 1) :: Row(1, 2, 1) :: Row(0, 0, 1) :: Nil,
Seq("key", "hash", "value"))
}
}
Seq(true).foreach { isPartitioned =>
test(s"upsert before update - with hash partition - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
appendHashPartition(Seq((2, 2, 1), (1, 2, 1), (0, 0, 1))
.toDF("key", "hash", "value"), partitions)
executeUpsert(Seq((2, 2, 2), (1, 3, 2), (0, 0, 2))
.toDF("key", "hash", "value"))
checkUpdate(condition = Some("hash = 2"), setClauses = "value = 3",
expectedResults = Row(2, 2, 3) :: Row(1, 2, 3) :: Row(1, 3, 2) :: Row(0, 0, 2) :: Nil,
Seq("key", "hash", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - Star table by name - Partition=$isPartitioned") {
withTable("star_table") {
val partitionByClause = if (isPartitioned) "PARTITIONED BY (key)" else ""
sql(
s"""
|CREATE TABLE star_table(key INT, value INT)
|USING star
|OPTIONS('path'='$tempPath')
|$partitionByClause
""".stripMargin)
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"))
checkUpdate(
condition = Some("key >= 1"),
setClauses = "value = key + value, key = key + 1",
expectedResults = Row(0, 3) :: Row(2, 5) :: Row(2, 2) :: Row(3, 4) :: Nil,
Seq("key", "value"),
tableName = Some("star_table"))
}
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"table has null values - partitioned=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq(("a", 1), (null, 2), (null, 3), ("d", 4)).toDF("value", "key"), partitions)
// predicate evaluates to null; no-op
checkUpdate(condition = Some("value = null"),
setClauses = "value = -1",
expectedResults =
Row("a", 1) :: Row(null, 2) :: Row(null, 3) :: Row("d", 4) :: Nil,
Seq("value", "key")
// Seq(("a", 1), (null, 2), (null, 3), ("d", 4)).toDF("key", "value")
)
checkUpdate(condition = Some("value = 'a'"),
setClauses = "key = -1",
expectedResults = Row("a", -1) :: Row(null, 2) :: Row(null, 3) :: Row("d", 4) :: Nil,
Seq("value", "key"))
checkUpdate(condition = Some("value is null"),
setClauses = "key = -2",
expectedResults = Row("a", -1) :: Row(null, -2) :: Row(null, -2) :: Row("d", 4) :: Nil,
Seq("value", "key"))
checkUpdate(condition = Some("value is not null"),
setClauses = "key = -3",
expectedResults = Row("a", -3) :: Row(null, -2) :: Row(null, -2) :: Row("d", -3) :: Nil,
Seq("value", "key"))
checkUpdate(condition = Some("value <=> null"),
setClauses = "key = -4",
expectedResults = Row("a", -3) :: Row(null, -4) :: Row(null, -4) :: Row("d", -3) :: Nil,
Seq("value", "key"))
}
}
test("basic case - condition is false") {
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"))
checkUpdate(condition = Some("1 != 1"), setClauses = "key = 1, value = 2",
expectedResults = Row(2, 2) :: Row(1, 4) :: Row(1, 1) :: Row(0, 3) :: Nil,
Seq("key", "value"))
}
test("basic case - condition is true") {
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"))
checkUpdate(condition = Some("1 = 1"), setClauses = "key = 1, value = 2",
expectedResults = Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Nil,
Seq("key", "value"))
}
test("basic case with hash partition - condition is true") {
appendHashPartition(Seq((2, 2, 2), (1, 2, 4), (1, 3, 1), (0, 3, 3))
.toDF("key", "hash", "value"))
checkUpdate(condition = Some("1 = 1"), setClauses = "key = 1, value = 2",
expectedResults = Row(1, 2, 2) :: Row(1, 3, 2) :: Nil,
Seq("key", "hash", "value"))
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - without where - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = None, setClauses = "key = 1, value = 2",
expectedResults = Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Row(1, 2) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - without where and partial columns - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = None, setClauses = "key = 1",
expectedResults = Row(1, 1) :: Row(1, 2) :: Row(1, 3) :: Row(1, 4) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - without where and out-of-order columns - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = None, setClauses = "value = 3, key = 1",
expectedResults = Row(1, 3) :: Row(1, 3) :: Row(1, 3) :: Row(1, 3) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - without where and complex input - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = None, setClauses = "value = key + 3, key = key + 1",
expectedResults = Row(1, 3) :: Row(2, 4) :: Row(2, 4) :: Row(3, 5) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - with where - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = Some("key = 1"), setClauses = "value = 3, key = 1",
expectedResults = Row(1, 3) :: Row(2, 2) :: Row(0, 3) :: Row(1, 3) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update with hash partition - with where - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
appendHashPartition(Seq((2, 2, 2), (1, 4, 4), (1, 1, 1), (0, 3, 3))
.toDF("key", "hash", "value"), partitions)
checkUpdate(condition = Some("hash = 1"), setClauses = "value = 3, key = 1",
expectedResults = Row(2, 2, 2) :: Row(1, 4, 4) :: Row(1, 1, 3) :: Row(0, 3, 3) :: Nil,
Seq("key", "hash", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - with where and complex input - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = Some("key >= 1"), setClauses = "value = key + value, key = key + 1",
expectedResults = Row(0, 3) :: Row(2, 5) :: Row(2, 2) :: Row(3, 4) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - with where and no row matched - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = Some("key >= 10"), setClauses = "value = key + value, key = key + 1",
expectedResults = Row(0, 3) :: Row(1, 1) :: Row(1, 4) :: Row(2, 2) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"type mismatch - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"), partitions)
checkUpdate(condition = Some("key >= 1"),
setClauses = "value = key + cast(value as String), key = key + '1'",
expectedResults = Row(0, 3) :: Row(2, 5) :: Row(3, 4) :: Row(2, 2) :: Nil,
Seq("key", "value"))
}
}
Seq(true, false).foreach { isPartitioned =>
test(s"basic update - TypeCoercion twice - Partition=$isPartitioned") {
val partitions = if (isPartitioned) "key" :: Nil else Nil
append(Seq((99, 2), (100, 4), (101, 3)).toDF("key", "value"), partitions)
checkUpdate(
condition = Some("cast(key as long) * cast('1.0' as decimal(38, 18)) > 100"),
setClauses = "value = -3",
expectedResults = Row(100, 4) :: Row(101, -3) :: Row(99, 2) :: Nil,
Seq("key", "value"))
}
}
// test("update cached table") {
// Seq((2, 2), (1, 4)).toDF("key", "value")
// .write.mode("overwrite").format("star").save(tempPath)
//
// spark.read.format("star").load(tempPath).cache()
// spark.read.format("star").load(tempPath).collect()
//
// executeUpdate(s"star.`$tempPath`", set = "key = 3")
// checkAnswer(spark.read.format("star").load(tempPath), Row(3, 2) :: Row(3, 4) :: Nil)
// }
test("different variations of column references") {
append(Seq((99, 2), (100, 4), (101, 3), (102, 5)).toDF("key", "value"))
spark.read.format("star").load(tempPath).createOrReplaceTempView("tblName")
checkUpdate(condition = Some("key = 99"), setClauses = "value = -1",
Row(99, -1) :: Row(100, 4) :: Row(101, 3) :: Row(102, 5) :: Nil,
Seq("key", "value"))
checkUpdate(condition = Some("`key` = 100"), setClauses = "`value` = -1",
Row(99, -1) :: Row(100, -1) :: Row(101, 3) :: Row(102, 5) :: Nil,
Seq("key", "value"))
checkUpdate(condition = Some("tblName.key = 101"), setClauses = "tblName.value = -1",
Row(99, -1) :: Row(100, -1) :: Row(101, -1) :: Row(102, 5) :: Nil,
Seq("key", "value"), Some("tblName"))
checkUpdate(condition = Some("`tblName`.`key` = 102"), setClauses = "`tblName`.`value` = -1",
Row(99, -1) :: Row(100, -1) :: Row(101, -1) :: Row(102, -1) :: Nil,
Seq("key", "value"), Some("tblName"))
}
test("tarTable columns can have db and table qualifiers") {
withTable("tarTable") {
spark.read.json(
"""
{"a": {"b.1": 1, "c.e": 'random'}, "d": 1}
{"a": {"b.1": 3, "c.e": 'string'}, "d": 2}"""
.split("\n").toSeq.toDS()).write.format("star").saveAsTable("`tarTable`")
executeUpdate(
tarTable = "tarTable",
set = "`default`.`tarTable`.a.`b.1` = -1, tarTable.a.`c.e` = 'RANDOM'",
where = "d = 1")
checkAnswer(spark.table("tarTable"),
spark.read.json(
"""
{"a": {"b.1": -1, "c.e": 'RANDOM'}, "d": 1}
{"a": {"b.1": 3, "c.e": 'string'}, "d": 2}"""
.split("\n").toSeq.toDS()))
}
}
test("Negative case - non-star tarTable") {
Seq((1, 1), (0, 3), (1, 5)).toDF("key1", "value")
.write.mode("overwrite").format("parquet").save(tempPath)
val e = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`", set = "key1 = 3")
}.getMessage
assert(e.contains("doesn't exist"))
}
test("Negative case - check tarTable columns during analysis") {
withTable("table") {
sql(s"CREATE TABLE table (s int, t string) USING star PARTITIONED BY (s) LOCATION '${tempDir.getCanonicalPath}'")
var ae = intercept[AnalysisException] {
executeUpdate("table", set = "column_doesnt_exist = 'San Francisco'", where = "t = 'a'")
}
assert(ae.message.contains("cannot resolve"))
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
executeUpdate(tarTable = "table", set = "S = 1, T = 'b'", where = "T = 'a'")
ae = intercept[AnalysisException] {
executeUpdate(tarTable = "table", set = "S = 1, s = 'b'", where = "s = 1")
}
assert(ae.message.contains("There is a conflict from these SET columns"))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
ae = intercept[AnalysisException] {
executeUpdate(tarTable = "table", set = "S = 1", where = "t = 'a'")
}
assert(ae.message.contains("cannot resolve"))
ae = intercept[AnalysisException] {
executeUpdate(tarTable = "table", set = "S = 1, s = 'b'", where = "s = 1")
}
assert(ae.message.contains("cannot resolve"))
// unresolved column in condition
ae = intercept[AnalysisException] {
executeUpdate(tarTable = "table", set = "s = 1", where = "T = 'a'")
}
assert(ae.message.contains("cannot resolve"))
}
}
}
test("Negative case - do not support subquery test") {
append(Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("key", "value"))
Seq((2, 2), (1, 4), (1, 1), (0, 3)).toDF("c", "d").createOrReplaceTempView("source")
// basic subquery
val e0 = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`",
set = "key = 1",
where = "key < (SELECT max(c) FROM source)")
}.getMessage
assert(e0.contains("Subqueries are not supported"))
// subquery with EXISTS
val e1 = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`",
set = "key = 1",
where = "EXISTS (SELECT max(c) FROM source)")
}.getMessage
assert(e1.contains("Subqueries are not supported"))
// subquery with NOT EXISTS
val e2 = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`",
set = "key = 1",
where = "NOT EXISTS (SELECT max(c) FROM source)")
}.getMessage
assert(e2.contains("Subqueries are not supported"))
// subquery with IN
val e3 = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`",
set = "key = 1",
where = "key IN (SELECT max(c) FROM source)")
}.getMessage
assert(e3.contains("Subqueries are not supported"))
// subquery with NOT IN
val e4 = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$tempPath`",
set = "key = 1",
where = "key NOT IN (SELECT max(c) FROM source)")
}.getMessage
assert(e4.contains("Subqueries are not supported"))
}
test("nested data support") {
// set a nested field
checkUpdateJson(tarTable =
"""
{"a": {"c": {"d": 'random', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "z = 10",
set = "a.c.d = 'RANDOM'" :: Nil,
expected =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""")
// do nothing as condition has no match
val unchanged =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}"""
checkUpdateJson(tarTable = unchanged,
updateWhere = "z = 30",
set = "a.c.d = 'RANDOMMMMM'" :: Nil,
expected = unchanged)
// set multiple nested fields at different levels
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "z = 20",
set = "a.c.d = 'RANDOM2'" :: "a.c.e = 'STR2'" :: "a.g = -2" :: "z = -20" :: Nil,
expected =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'RANDOM2', "e": 'STR2'}, "g": -2}, "z": -20}""")
// set nested fields to null
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "a.c.d = 'random2'",
set = "a.c = null" :: "a.g = null" :: Nil,
expected =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": null, "g": null}, "z": 20}""")
// set a top struct type column to null
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "a.c.d = 'random2'",
set = "a = null" :: Nil,
expected =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": null, "z": 20}""")
// set a nested field using named_struct
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "a.g = 2",
set = "a.c = named_struct('d', 'RANDOM2', 'e', 'STR2')" :: Nil,
expected =
"""
{"a": {"c": {"d": 'RANDOM', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'RANDOM2', "e": 'STR2'}, "g": 2}, "z": 20}""")
// set an integer nested field with a string that can be casted into an integer
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'random', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""",
updateWhere = "z = 10",
set = "a.g = '-1'" :: "z = '30'" :: Nil,
expected =
"""
{"a": {"c": {"d": 'random', "e": 'str'}, "g": -1}, "z": 30}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}""")
// set the nested data that has an Array field
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'random', "e": [1, 11]}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'RANDOM2', "e": [2, 22]}, "g": 2}, "z": 20}""",
updateWhere = "z = 20",
set = "a.c.d = 'RANDOM22'" :: "a.g = -2" :: Nil,
expected =
"""
{"a": {"c": {"d": 'random', "e": [1, 11]}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'RANDOM22', "e": [2, 22]}, "g": -2}, "z": 20}""")
// set an array field
checkUpdateJson(
tarTable =
"""
{"a": {"c": {"d": 'random', "e": [1, 11]}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'RANDOM22', "e": [2, 22]}, "g": -2}, "z": 20}""",
updateWhere = "z = 10",
set = "a.c.e = array(-1, -11)" :: "a.g = -1" :: Nil,
expected =
"""
{"a": {"c": {"d": 'random', "e": [-1, -11]}, "g": -1}, "z": 10}
{"a": {"c": {"d": 'RANDOM22', "e": [2, 22]}, "g": -2}, "z": 20}""")
// set an array field as a top-level attribute
checkUpdateJson(
tarTable =
"""
{"a": [1, 11], "b": 'Z'}
{"a": [2, 22], "b": 'Y'}""",
updateWhere = "b = 'Z'",
set = "a = array(-1, -11, -111)" :: Nil,
expected =
"""
{"a": [-1, -11, -111], "b": 'Z'}
{"a": [2, 22], "b": 'Y'}""")
}
test("nested data - negative case") {
val targetDF = spark.read.json(
"""
{"a": {"c": {"d": 'random', "e": 'str'}, "g": 1}, "z": 10}
{"a": {"c": {"d": 'random2', "e": 'str2'}, "g": 2}, "z": 20}"""
.split("\n").toSeq.toDS())
testAnalysisException(
targetDF,
set = "a.c = 'RANDOM2'" :: Nil,
where = "z = 10",
errMsgs = "data type mismatch" :: Nil)
testAnalysisException(
targetDF,
set = "a.c.z = 'RANDOM2'" :: Nil,
errMsgs = "No such struct field" :: Nil)
testAnalysisException(
targetDF,
set = "a.c = named_struct('d', 'rand', 'e', 'str')" :: "a.c.d = 'RANDOM2'" :: Nil,
errMsgs = "There is a conflict from these SET columns" :: Nil)
testAnalysisException(
targetDF,
set =
Seq("a = named_struct('c', named_struct('d', 'rand', 'e', 'str'))", "a.c.d = 'RANDOM2'"),
errMsgs = "There is a conflict from these SET columns" :: Nil)
val schema = new StructType().add("a", MapType(StringType, IntegerType))
val mapData = spark.read.schema(schema).json(Seq("""{"a": {"b": 1}}""").toDS())
testAnalysisException(
mapData,
set = "a.b = -1" :: Nil,
errMsgs = "Updating nested fields is only supported for StructType" :: Nil)
// Updating an ArrayStruct is not supported
val arrayStructData = spark.read.json(Seq("""{"a": [{"b": 1}, {"b": 2}]}""").toDS())
testAnalysisException(
arrayStructData,
set = "a.b = -1" :: Nil,
errMsgs = "Updating nested fields is only supported for StructType" :: Nil)
}
protected def checkUpdateJson(
tarTable: Seq[String],
source: Seq[String] = Nil,
updateWhere: String,
set: Seq[String],
expected: Seq[String]): Unit = {
withTempDir { dir =>
withTempView("source") {
def toDF(jsonStrs: Seq[String]) = spark.read.json(jsonStrs.toDS)
toDF(tarTable).write.format("star").mode("overwrite").save(dir.toString)
if (source.nonEmpty) {
toDF(source).createOrReplaceTempView("source")
}
executeUpdate(s"star.`$dir`", set, updateWhere)
checkAnswer(readStarLakeTable(dir.toString), toDF(expected))
}
}
}
protected def testAnalysisException(
targetDF: DataFrame,
set: Seq[String],
where: String = null,
errMsgs: Seq[String] = Nil) = {
withTempDir { dir =>
targetDF.write.format("star").save(dir.toString)
val e = intercept[AnalysisException] {
executeUpdate(tarTable = s"star.`$dir`", set, where)
}
errMsgs.foreach { msg =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(msg.toLowerCase(Locale.ROOT)))
}
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/Command.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.expressions.{Expression, SubqueryExpression}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.star.TransactionCommit
import org.apache.spark.sql.star.sources.StarLakeBaseRelation
import org.apache.spark.sql.star.utils.DataFileInfo
import org.apache.spark.sql.{AnalysisException, SparkSession}
/**
* Helper trait for all commands.
*/
trait Command {
/**
* Converts string predicates into [[Expression]]s relative to a transaction.
*
* @throws AnalysisException if a non-partition column is referenced.
*/
protected def parsePartitionPredicates(spark: SparkSession,
predicate: String): Seq[Expression] = {
try {
spark.sessionState.sqlParser.parseExpression(predicate) :: Nil
} catch {
case e: ParseException =>
throw new AnalysisException(s"Cannot recognize the predicate '$predicate'", cause = Some(e))
}
}
protected def verifyPartitionPredicates(spark: SparkSession,
rangePartitionColumns: String,
predicates: Seq[Expression]): Unit = {
predicates.foreach { pred =>
if (SubqueryExpression.hasSubquery(pred)) {
throw new AnalysisException("Subquery is not supported in partition predicates.")
}
val nameEquality = spark.sessionState.conf.resolver
pred.references.foreach { col =>
val partitionColumns = if (rangePartitionColumns.equalsIgnoreCase("")) {
Seq.empty[String]
} else {
rangePartitionColumns.split(",").toSeq
}
partitionColumns.find(f => nameEquality(f, col.name)).getOrElse {
throw new AnalysisException(
s"Predicate references non-range-partition column '${col.name}'. " +
"Only the range partition columns may be referenced: " +
s"[${partitionColumns.mkString(", ")}]")
}
}
}
}
/**
* Generates a map of file names to add file entries for operations where we will need to
* rewrite files such as delete, merge, update. We expect file names to be unique, because
* each file contains a UUID.
*/
protected def generateCandidateFileMap(candidateFiles: Seq[DataFileInfo]): Map[String, DataFileInfo] = {
val nameToFileMap = candidateFiles.map(file =>
new Path(file.file_path).toString -> file).toMap
assert(nameToFileMap.size == candidateFiles.length,
s"File name collisions found among:\n${candidateFiles.map(_.file_path).mkString("\n")}")
nameToFileMap
}
/**
* Build a base relation of files that need to be rewritten as part of an update/delete/merge
* operation.
*/
protected def buildBaseRelation(spark: SparkSession,
tc: TransactionCommit,
inputLeafFiles: Seq[String],
nameToFileMap: Map[String, DataFileInfo]): StarLakeBaseRelation = {
val scannedFiles = inputLeafFiles.map(f => getTouchedFile(f, nameToFileMap))
StarLakeBaseRelation(scannedFiles, tc.snapshotManagement)(spark)
}
/**
* Find the AddFile record corresponding to the file that was read as part of a
* delete/update operation.
*
* @param filePath The path to a file. Can be either absolute or relative
* @param nameToFileMap Map generated through `generateCandidateFileMap()`
*/
protected def getTouchedFile(filePath: String,
nameToFileMap: Map[String, DataFileInfo]): DataFileInfo = {
val absolutePath = new Path(filePath).toString
nameToFileMap.getOrElse(absolutePath, {
throw new IllegalStateException(s"File ($absolutePath) to be rewritten not found " +
s"among candidate files:\n${nameToFileMap.keys.mkString("\n")}")
})
}
/**
* This method provides the RemoveFile actions that are necessary for files that are touched and
* need to be rewritten in methods like Delete, Update.
*
* @param nameToFileMap A map generated using `generateCandidateFileMap`.
* @param filesToRewrite Absolute paths of the files that were touched. We will search for these
* in `candidateFiles`. Obtained as the output of the `input_file_name`
* function.
* @param operationTimestamp The timestamp of the operation
*/
protected def removeFilesFromPaths(nameToFileMap: Map[String, DataFileInfo],
filesToRewrite: Seq[String],
operationTimestamp: Long): Seq[DataFileInfo] = {
filesToRewrite.map { absolutePath =>
val file = getTouchedFile(absolutePath, nameToFileMap)
file.expire(operationTimestamp)
}
}
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/livy/ExecuteWithLivy.scala
|
<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.livy
import java.io.FileNotFoundException
import java.net.URI
import com.engineplus.star.tables.StarTable
import org.apache.livy.{Job, JobContext, LivyClient, LivyClientBuilder}
object ExecuteWithLivy {
def getLivyClient(conf: Map[String, String]): LivyClient = {
val livyClientBuilder = new LivyClientBuilder()
.setURI(new URI(conf.getOrElse("spark.livy.host", "http://localhost:8998")))
conf.foreach(m => livyClientBuilder.setConf(m._1, m._2))
livyClientBuilder.build()
}
def getSourcePath(obj: Object): String = {
val source = obj.getClass.getProtectionDomain.getCodeSource
if (source != null && source.getLocation.getPath != "") {
source.getLocation.getPath
} else {
throw new FileNotFoundException(s"Jar containing ${obj.getClass.getName} not found.")
}
}
}
class CompactionJob(tableName: String, force: Boolean)
extends Job[Unit] {
override def call(jobContext: JobContext): Unit = {
val ss = jobContext.sqlctx().sparkSession
StarTable.forPath(ss, tableName).compaction(force)
}
}
class CompactionJobWithCondition(tableName: String, condition: String, force: Boolean)
extends Job[Unit] {
override def call(jobContext: JobContext): Unit = {
val ss = jobContext.sqlctx().sparkSession
StarTable.forPath(ss, tableName).compaction(condition, force)
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/DataFrameWriterV2Suite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import com.engineplus.star.tables.StarTable
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.connector.catalog.{CatalogV2Util, Identifier, Table, TableCatalog}
import org.apache.spark.sql.connector.expressions._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.star.catalog.{StarLakeCatalog, StarLakeTableV2}
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{LongType, StringType, StructType}
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.scalatest.BeforeAndAfter
import scala.collection.JavaConverters._
// These tests are copied from Apache Spark (minus partition by expressions) and should work exactly
// the same with Star minus some writer options
trait DataFrameWriterV2Tests
extends QueryTest
with SharedSparkSession
with BeforeAndAfter {
import testImplicits._
before {
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data")
df2.createOrReplaceTempView("source2")
}
after {
spark.sessionState.catalog.listTables("default").foreach { ti =>
val location = try {
Option(spark.sessionState.catalog.getTableMetadata(ti).location)
} catch {
case e: Exception => None
}
spark.sessionState.catalog.dropTable(ti, ignoreIfNotExists = false, purge = true)
if (location.isDefined) {
try {
StarTable.forPath(location.get.toString).dropTable()
} catch {
case e: Exception =>
}
}
}
}
def catalog: TableCatalog = {
spark.sessionState.catalogManager.currentCatalog.asInstanceOf[StarLakeCatalog]
}
protected def getProperties(table: Table): Map[String, String] = {
table.properties().asScala.toMap.filterKeys(
!CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(_))
}
test("Append: basic append") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
spark.table("source").writeTo("table_name").append()
checkAnswer(
spark.table("table_name"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
spark.table("source2").writeTo("table_name").append()
checkAnswer(
spark.table("table_name"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c"), Row(4L, "d"), Row(5L, "e"), Row(6L, "f")))
}
test("Append: by name not position") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
val exc = intercept[AnalysisException] {
spark.table("source").withColumnRenamed("data", "d").writeTo("table_name").append()
}
assert(exc.getMessage.contains("schema mismatch"))
checkAnswer(
spark.table("table_name"),
Seq())
}
test("Append: fail if table does not exist") {
val exc = intercept[AnalysisException] {
spark.table("source").writeTo("table_name").append()
}
assert(exc.getMessage.contains("table_name"))
}
test("Overwrite: overwrite by expression: true") {
spark.sql(
"CREATE TABLE table_name (id bigint, data string) USING star PARTITIONED BY (id)")
checkAnswer(spark.table("table_name"), Seq.empty)
spark.table("source").writeTo("table_name").append()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
spark.table("source2").writeTo("table_name").overwrite(lit(true))
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f")))
}
test("Overwrite: overwrite by expression: id = 3") {
spark.sql(
"CREATE TABLE table_name (id bigint, data string) USING star PARTITIONED BY (id)")
checkAnswer(spark.table("table_name"), Seq.empty)
spark.table("source").writeTo("table_name").append()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val e = intercept[AnalysisException] {
spark.table("source2").writeTo("table_name").overwrite($"id" === 3)
}
assert(e.getMessage.contains("Invalid data would be written to partitions"))
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
}
test("Overwrite: by name not position") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
val exc = intercept[AnalysisException] {
spark.table("source").withColumnRenamed("data", "d")
.writeTo("table_name").overwrite(lit(true))
}
assert(exc.getMessage.contains("schema mismatch"))
checkAnswer(
spark.table("table_name"),
Seq())
}
test("Overwrite: fail if table does not exist") {
val exc = intercept[AnalysisException] {
spark.table("source").writeTo("table_name").overwrite(lit(true))
}
assert(exc.getMessage.contains("table_name"))
}
test("OverwritePartitions: overwrite conflicting partitions") {
spark.sql(
"CREATE TABLE table_name (id bigint, data string) USING star PARTITIONED BY (id)")
checkAnswer(spark.table("table_name"), Seq.empty)
spark.table("source").writeTo("table_name").append()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val e = intercept[AnalysisException] {
spark.table("source2").withColumn("id", $"id" - 2)
.writeTo("table_name").overwritePartitions()
}
assert(e.getMessage.contains("Table default.table_name does not support dynamic overwrite"))
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
}
test("OverwritePartitions: overwrite all rows if not partitioned") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
spark.table("source").writeTo("table_name").append()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val e = intercept[AnalysisException] {
spark.table("source2").writeTo("table_name").overwritePartitions()
}
assert(e.getMessage.contains("Table default.table_name does not support dynamic overwrite"))
}
test("OverwritePartitions: by name not position") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
val e = intercept[AnalysisException] {
spark.table("source").withColumnRenamed("data", "d")
.writeTo("table_name").overwritePartitions()
}
assert(e.getMessage.contains("Table default.table_name does not support dynamic overwrite"))
checkAnswer(
spark.table("table_name"),
Seq())
}
test("OverwritePartitions: fail if table does not exist") {
val exc = intercept[AnalysisException] {
spark.table("source").writeTo("table_name").overwritePartitions()
}
assert(exc.getMessage.contains("table_name"))
}
test("Create: basic behavior") {
spark.table("source").writeTo("table_name").using("star").create()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("id", LongType).add("data", StringType))
assert(table.partitioning.isEmpty)
assert(getProperties(table).isEmpty)
}
test("Create: with using") {
spark.table("source").writeTo("table_name").using("star").create()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("id", LongType).add("data", StringType))
assert(table.partitioning.isEmpty)
assert(getProperties(table).isEmpty)
}
test("Create: identity partitioned table") {
spark.table("source").writeTo("table_name").using("star")
.partitionedBy($"id").create()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("data", StringType).add("id", LongType, false))
assert(table.partitioning === Seq(IdentityTransform(FieldReference("id"))))
assert(getProperties(table).isEmpty)
}
test("Create: fail if table already exists") {
spark.sql(
"CREATE TABLE table_name (id bigint, data string) USING star PARTITIONED BY (id)")
val exc = intercept[TableAlreadyExistsException] {
spark.table("source").writeTo("table_name").using("star").create()
}
assert(exc.getMessage.contains("table_name"))
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
// table should not have been changed
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("data", StringType).add("id", LongType, false))
assert(table.partitioning === Seq(IdentityTransform(FieldReference("id"))))
assert(getProperties(table).isEmpty)
}
test("Replace: not support") {
spark.sql(
"CREATE TABLE table_name (id bigint, data string) USING star PARTITIONED BY (id)")
spark.sql("INSERT INTO TABLE table_name SELECT data,id FROM source")
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
// validate the initial table
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("data", StringType).add("id", LongType, false))
assert(table.partitioning === Seq(IdentityTransform(FieldReference("id"))))
assert(getProperties(table).isEmpty)
val e = intercept[AnalysisException] {
spark.table("source2")
.withColumn("even_or_odd", when(($"id" % 2) === 0, "even").otherwise("odd"))
.writeTo("table_name").using("star")
.replace()
}
assert(e.getMessage().contains("`replaceTable` is not supported for Star tables"))
}
test("CreateOrReplace: failed when table exist") {
spark.table("source").writeTo("table_name").using("star").createOrReplace()
checkAnswer(
spark.table("table_name").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
val e = intercept[AnalysisException] {
spark.table("source2").writeTo("table_name").using("star").createOrReplace()
}
assert(e.getMessage().contains("`replaceTable` is not supported for Star tables"))
}
test("Create: partitioned by years(ts) - not supported") {
val e = intercept[AnalysisException] {
spark.table("source")
.withColumn("ts", lit("2019-06-01 10:00:00.000000").cast("timestamp"))
.writeTo("table_name")
.partitionedBy(years($"ts"))
.using("star")
.create()
}
assert(e.getMessage.contains("Partitioning by expressions"))
}
test("Create: partitioned by months(ts) - not supported") {
val e = intercept[AnalysisException] {
spark.table("source")
.withColumn("ts", lit("2019-06-01 10:00:00.000000").cast("timestamp"))
.writeTo("table_name")
.partitionedBy(months($"ts"))
.using("star")
.create()
}
assert(e.getMessage.contains("Partitioning by expressions"))
}
test("Create: partitioned by days(ts) - not supported") {
val e = intercept[AnalysisException] {
spark.table("source")
.withColumn("ts", lit("2019-06-01 10:00:00.000000").cast("timestamp"))
.writeTo("table_name")
.partitionedBy(days($"ts"))
.using("star")
.create()
}
assert(e.getMessage.contains("Partitioning by expressions"))
}
test("Create: partitioned by hours(ts) - not supported") {
val e = intercept[AnalysisException] {
spark.table("source")
.withColumn("ts", lit("2019-06-01 10:00:00.000000").cast("timestamp"))
.writeTo("table_name")
.partitionedBy(hours($"ts"))
.using("star")
.create()
}
assert(e.getMessage.contains("Partitioning by expressions"))
}
test("Create: partitioned by bucket(4, id) - not supported") {
val e = intercept[AnalysisException] {
spark.table("source")
.writeTo("table_name")
.partitionedBy(bucket(4, $"id"))
.using("star")
.create()
}
assert(e.getMessage.contains("Bucketing"))
}
}
class DataFrameWriterV2Suite
extends DataFrameWriterV2Tests
with StarLakeSQLCommandTest {
import testImplicits._
test("Append: basic append by path") {
spark.sql("CREATE TABLE table_name (id bigint, data string) USING star")
checkAnswer(spark.table("table_name"), Seq.empty)
val location = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
.asInstanceOf[StarLakeTableV2].path
spark.table("source").writeTo(s"star.`$location`").append()
checkAnswer(
spark.table(s"star.`$location`").select("id", "data"),
Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c")))
}
test("Create: basic behavior by path - short table name can't be a path") {
withTempDir { tempDir =>
val dir = tempDir.getCanonicalPath
val e = intercept[AssertionError] {
spark.table("source").writeTo(s"star.`$dir`").using("star").create()
}
assert(e.getMessage.contains("Short Table name") && e.getMessage.contains("can't be a path"))
}
}
test("Create: using empty dataframe") {
spark.table("source").where("false")
.writeTo("table_name").using("star")
.partitionedBy($"id").create()
checkAnswer(spark.table("table_name"), Seq.empty[Row])
val table = catalog.loadTable(Identifier.of(Array("default"), "table_name"))
assert(table.name === "default.table_name")
assert(table.schema === new StructType().add("data", StringType).add("id", LongType, false))
assert(table.partitioning === Seq(IdentityTransform(FieldReference("id"))))
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/StarLakePartFileMerge.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2ScanRelation}
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.exception.{MetaRerunException, StarLakeErrors}
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.utils.DataFileInfo
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import scala.collection.JavaConversions._
import scala.util.control.Breaks.{break, breakable}
object StarLakePartFileMerge {
def partMergeCompaction(sparkSession: SparkSession,
snapshotManagement: SnapshotManagement,
groupAndSortedFiles: Iterable[Seq[DataFileInfo]],
mergeOperatorInfo: Map[String, String],
isCompactionCommand: Boolean): Seq[DataFileInfo] = {
val conf = sparkSession.sessionState.conf
val minimumNum = conf.getConf(StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM)
val limitMergeSize = minimumNum * 128 * 1024 * 1024 * conf.getConf(StarLakeSQLConf.PART_MERGE_FILE_SIZE_FACTOR)
var currentVersion: Long = 0
var currentSize: Long = 0
var currentFiles: Int = 0
var notFinish = true
var commitFlag = isCompactionCommand || conf.getConf(StarLakeSQLConf.PART_MERGE_COMPACTION_COMMIT_ENABLE)
var needMergeFiles = groupAndSortedFiles
while (notFinish) {
//take first iter(a group of files with same bucket id)
val iter = needMergeFiles.head.iterator
breakable {
while (iter.hasNext) {
val file = iter.next()
currentSize += (Math.min(file.size, 134217728) + conf.filesOpenCostInBytes)
currentVersion = file.write_version
currentFiles += 1
if (currentSize > limitMergeSize && currentFiles > minimumNum) {
snapshotManagement.withNewPartMergeTransaction(pmtc => {
//merge part of files
val partFiles = needMergeFiles.flatMap(_.filter(_.write_version < currentVersion)).toSeq
val (flag, newFiles) = executePartFileCompaction(
sparkSession,
snapshotManagement,
pmtc,
partFiles,
mergeOperatorInfo,
commitFlag)
//compaction should commit success
if (isCompactionCommand && !flag) {
throw StarLakeErrors.compactionFailedWithPartMergeException()
} else {
commitFlag = flag
}
//
val notMergedFiles = needMergeFiles.flatMap(_.filter(_.write_version >= currentVersion)).toSeq
val newFilesChangeWriteVersion = newFiles.map(_.copy(write_version = 0))
needMergeFiles = (newFilesChangeWriteVersion ++ notMergedFiles)
.groupBy(_.file_bucket_id).values.map(m => m.sortBy(_.write_version))
currentSize = 0
currentVersion = 0
currentFiles = 0
})
break
}
}
notFinish = false
}
}
needMergeFiles.flatten.toSeq
}
def executePartFileCompaction(spark: SparkSession,
snapshotManagement: SnapshotManagement,
pmtc: PartMergeTransactionCommit,
files: Seq[DataFileInfo],
mergeOperatorInfo: Map[String, String],
commitFlag: Boolean): (Boolean, Seq[DataFileInfo]) = {
val fileIndex = BatchDataFileIndexV2(spark, snapshotManagement, files)
val table = StarLakeTableV2(
spark,
new Path(snapshotManagement.table_name),
None,
None,
Option(fileIndex),
Option(mergeOperatorInfo)
)
val option = new CaseInsensitiveStringMap(
Map("basePath" -> pmtc.tableInfo.table_name, "isCompaction" -> "true"))
val compactDF = Dataset.ofRows(
spark,
DataSourceV2Relation(
table,
table.schema().toAttributes,
None,
None,
option
)
)
pmtc.setReadFiles(files)
pmtc.setCommitType("part_compaction")
val newFiles = pmtc.writeFiles(compactDF, isCompaction = true)
//if part compaction failed before, it will not commit later
var flag = commitFlag
if (flag) {
try {
pmtc.commit(newFiles, files)
} catch {
case e: MetaRerunException =>
if (e.getMessage.contains("deleted by another job")) {
flag = false
}
case e: Exception => throw e
}
}
(flag, newFiles)
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/rules/PreprocessTableDelete.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.rules
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, StarDelete}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.StarLakeTableRelationV2
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.commands.DeleteCommand
import org.apache.spark.sql.star.exception.StarLakeErrors
/**
* Preprocess the [[StarDelete]] plan to convert to [[DeleteCommand]].
*/
case class PreprocessTableDelete(sqlConf: SQLConf) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
plan.resolveOperators {
case d: StarDelete if d.resolved =>
d.condition.foreach { cond =>
if (SubqueryExpression.hasSubquery(cond)) {
throw StarLakeErrors.subqueryNotSupportedException("DELETE", cond)
}
}
toCommand(d)
}
}
def toCommand(d: StarDelete): DeleteCommand = EliminateSubqueryAliases(d.child) match {
case StarLakeTableRelationV2(tbl: StarLakeTableV2) =>
DeleteCommand(tbl.snapshotManagement, d.child, d.condition)
case o =>
throw StarLakeErrors.notAnStarLakeSourceException("DELETE", Some(o))
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/commands/UpdateScalaSuite.scala
|
<filename>src/test/scala/org/apache/spark/sql/star/commands/UpdateScalaSuite.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import java.util.Locale
import com.engineplus.star.tables
import com.engineplus.star.tables.{StarTable, StarTableTestUtils}
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.{Row, functions}
class UpdateScalaSuite extends UpdateSuiteBase with StarLakeSQLCommandTest {
import testImplicits._
test("update cached table") {
Seq((2, 2), (1, 4)).toDF("key", "value")
.write.mode("overwrite").format("star").save(tempPath)
spark.read.format("star").load(tempPath).cache()
spark.read.format("star").load(tempPath).collect()
executeUpdate(s"star.`$tempPath`", set = "key = 3")
checkAnswer(spark.read.format("star").load(tempPath), Row(3, 2) :: Row(3, 4) :: Nil)
}
test("update usage test - without condition") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.updateExpr(Map("key" -> "100"))
checkAnswer(readStarLakeTable(tempPath),
Row(100, 10) :: Row(100, 20) :: Row(100, 30) :: Row(100, 40) :: Nil)
}
test("update usage test - without condition, using Column") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.update(Map("key" -> functions.expr("100")))
checkAnswer(readStarLakeTable(tempPath),
Row(100, 10) :: Row(100, 20) :: Row(100, 30) :: Row(100, 40) :: Nil)
}
test("update usage test - with condition") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.updateExpr("key = 1 or key = 2", Map("key" -> "100"))
checkAnswer(readStarLakeTable(tempPath),
Row(100, 10) :: Row(100, 20) :: Row(3, 30) :: Row(4, 40) :: Nil)
}
test("update usage test - with condition, using Column") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.update(functions.expr("key = 1 or key = 2"),
Map("key" -> functions.expr("100"), "value" -> functions.expr("101")))
checkAnswer(readStarLakeTable(tempPath),
Row(100, 101) :: Row(100, 101) :: Row(3, 30) :: Row(4, 40) :: Nil)
}
override protected def executeUpdate(target: String,
set: String,
where: String = null): Unit = {
executeUpdate(target, set.split(","), where)
}
override protected def executeUpdate(target: String,
set: Seq[String],
where: String): Unit = {
def parse(tableNameWithAlias: String): (String, Option[String]) = {
tableNameWithAlias.split(" ").toList match {
case tableName :: Nil => tableName -> None
case tableName :: alias :: Nil =>
val ordinary = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')).toSet
if (alias.forall(ordinary.contains(_))) {
tableName -> Some(alias)
} else {
tableName + " " + alias -> None
}
case list if list.size >= 3 && list(list.size - 2).toLowerCase(Locale.ROOT) == "as" =>
list.dropRight(2).mkString(" ").trim() -> Some(list.last)
case list if list.size >= 2 =>
list.dropRight(1).mkString(" ").trim() -> Some(list.last)
case _ =>
fail(s"Could not build parse '$tableNameWithAlias' for table and optional alias")
}
}
val starTable: StarTable = {
val (tableNameOrPath, optionalAlias) = parse(target)
val isPath: Boolean = tableNameOrPath.startsWith("star.")
val table = if (isPath) {
val path = tableNameOrPath.stripPrefix("star.`").stripSuffix("`")
tables.StarTable.forPath(spark, path)
} else {
StarTableTestUtils.createTable(spark.table(tableNameOrPath),
SnapshotManagement(tableNameOrPath))
}
optionalAlias.map(table.as(_)).getOrElse(table)
}
val setColumns = set.map { assign =>
val kv = assign.split("=")
require(kv.size == 2)
kv(0).trim -> kv(1).trim
}.toMap
if (where == null) {
starTable.updateExpr(setColumns)
} else {
starTable.updateExpr(where, setColumns)
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/sources/StarLakeSQLConf.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.sources
import org.apache.spark.internal.config.{ConfigBuilder, ConfigEntry}
import org.apache.spark.sql.internal.SQLConf
object StarLakeSQLConf {
def buildConf(key: String): ConfigBuilder = SQLConf.buildConf(s"spark.engineplus.star.$key")
def buildStaticConf(key: String): ConfigBuilder =
SQLConf.buildStaticConf(s"spark.engineplus.star.$key")
val SCHEMA_AUTO_MIGRATE: ConfigEntry[Boolean] =
buildConf("schema.autoMerge.enabled")
.doc("If true, enables schema merging on appends and on overwrites.")
.booleanConf
.createWithDefault(false)
val USE_DELTA_FILE: ConfigEntry[Boolean] =
buildConf("deltaFile.enabled")
.doc("If true, enables delta files on specific scene(e.g. upsert).")
.booleanConf
.createWithDefault(true)
val MAX_DELTA_FILE_NUM: ConfigEntry[Int] =
buildConf("deltaFile.max.num")
.doc("Maximum delta files allowed, default is 5.")
.intConf
.createWithDefault(5)
val COMPACTION_TIME: ConfigEntry[Long] =
buildConf("compaction.interval")
.doc("If the last update time exceeds the set interval, compaction will be triggered, default is 12 hours.")
.longConf
.createWithDefault(12 * 60 * 60 * 1000L)
// History file cleanup interval
val OLD_VERSION_RETENTION_TIME: ConfigEntry[Long] =
buildConf("cleanup.interval")
.doc("Retention time of old version data, default is 5 hours.")
.longConf
.createWithDefault(5 * 60 * 60 * 1000L)
val CLEANUP_PARALLELISM: ConfigEntry[Int] =
buildConf("cleanup.parallelism")
.doc("The number of parallelism to list a collection of path recursively when cleanup, default is 200.")
.intConf
.createWithDefault(200)
val CLEANUP_CONCURRENT_DELETE_ENABLE: ConfigEntry[Boolean] =
buildConf("cleanup.concurrent.delete.enable")
.doc("If enable delete files concurrently.")
.booleanConf
.createWithDefault(true)
//default meta database name
val META_DATABASE_NAME: ConfigEntry[String] =
buildConf("meta.database.name")
.doc(
"""
|Default database of meta tables in Cassandra.
|User should not change it unless you know what you are going to do.
""".stripMargin)
.stringConf
.createWithDefault("star_meta")
val META_HOST: ConfigEntry[String] =
buildConf("meta.host")
.doc(
"""
|Contact point to connect to the Cassandra cluster.
|A comma separated list may also be used.("127.0.0.1,192.168.0.1")
""".stripMargin)
.stringConf
.createWithDefault("localhost")
val META_PORT: ConfigEntry[Int] =
buildConf("meta.port")
.doc("Cassandra native connection port.")
.intConf
.createWithDefault(9042)
val META_USERNAME: ConfigEntry[String] =
buildConf("meta.username")
.doc(
"""
|Cassandra username, default is `cassandra`.
""".stripMargin)
.stringConf
.createWithDefault("cassandra")
val META_PASSWORD: ConfigEntry[String] =
buildConf("meta.password")
.doc(
"""
|Cassandra password, default is `cassandra`.
""".stripMargin)
.stringConf
.createWithDefault("cassandra")
val META_CONNECT_FACTORY: ConfigEntry[String] =
buildConf("meta.connect.factory")
.doc(
"""
|CassandraConnectionFactory providing connections to the Cassandra cluster.
""".stripMargin)
.stringConf
.createWithDefault("com.engineplus.star.meta.CustomConnectionFactory")
val META_CONNECT_TIMEOUT: ConfigEntry[Int] =
buildConf("meta.connect.timeout")
.doc(
"""
|Timeout for connecting to cassandra, default is 60s.
""".stripMargin)
.intConf
.createWithDefault(60 * 1000)
val META_READ_TIMEOUT: ConfigEntry[Int] =
buildConf("meta.read.timeout")
.doc(
"""
|Timeout for reading to cassandra, default is 30s.
""".stripMargin)
.intConf
.createWithDefault(30 * 1000)
val META_MAX_CONNECT_PER_EXECUTOR: ConfigEntry[Int] =
buildConf("meta.connections_per_executor_max")
.doc(
"""
|Maximum number of connections per Host set on each Executor JVM. Will be
|updated to DefaultParallelism / Executors for Spark Commands. Defaults to 1
| if not specifying and not in a Spark Env.
""".stripMargin)
.intConf
.createWithDefault(600)
val META_GET_LOCK_MAX_ATTEMPTS: ConfigEntry[Int] =
buildConf("meta.get.lock.max.attempts")
.doc(
"""
|The maximum times for a commit attempts to acquire the lock.
""".stripMargin)
.intConf
.createWithDefault(5)
val META_GET_LOCK_WAIT_INTERVAL: ConfigEntry[Int] =
buildConf("meta.acquire.write.lock.wait.interval")
.doc(
"""
|The wait time when a commit failed to get write lock because another job is committing.
""".stripMargin)
.intConf
.createWithDefault(5)
val META_GET_LOCK_RETRY_INTERVAL: ConfigEntry[Int] =
buildConf("meta.acquire.write.lock.retry.interval")
.doc(
"""
|The interval time when a commit failed to get write lock.
|The commit will wait a random time between 0 and RETRY_INTERVAL seconds.
""".stripMargin)
.intConf
.createWithDefault(20)
val META_COMMIT_TIMEOUT: ConfigEntry[Long] =
buildConf("meta.commit.timeout")
.doc(
"""
|The maximum timeout for a committer.
""".stripMargin)
.longConf
.createWithDefault(20 * 1000L)
val META_UNDO_LOG_TIMEOUT: ConfigEntry[Long] =
buildConf("meta.undo_log.timeout")
.doc(
"""
|The maximum timeout for undo log(exclude Commit type).
|This parameter will only be used in Cleanup operation.
""".stripMargin)
.longConf
.createWithDefault(30 * 60 * 1000L)
val META_STREAMING_INFO_TIMEOUT: ConfigEntry[Long] =
buildConf("meta.streaming_info.timeout")
.doc(
"""
|The maximum timeout for streaming info.
|This parameter will only be used in Cleanup operation.
""".stripMargin)
.longConf
.createWithDefault(12 * 60 * 60 * 1000L)
val META_MAX_COMMIT_ATTEMPTS: ConfigEntry[Int] =
buildConf("meta.commit.max.attempts")
.doc(
"""
|The maximum times for a job attempts to commit.
""".stripMargin)
.intConf
.createWithDefault(5)
val META_MAX_SIZE_PER_VALUE: ConfigEntry[Int] =
buildConf("meta.max.size.per.value")
.doc(
"""
|The maximum size for undo log value(e.g. table_info.table_schema).
|If value size exceed this limit, it will be split into some fragment values.
""".stripMargin)
.intConf
.createWithDefault(50 * 1024)
//dorp table await time
val DROP_TABLE_WAIT_SECONDS: ConfigEntry[Int] =
buildConf("drop.table.wait.seconds")
.doc(
"""
|When dropping table or partition, we need wait a few seconds for the other commits to be completed.
""".stripMargin)
.intConf
.createWithDefault(1)
val ALLOW_FULL_TABLE_UPSERT: ConfigEntry[Boolean] =
buildConf("full.partitioned.table.scan.enabled")
.doc("If true, enables full table scan when upsert.")
.booleanConf
.createWithDefault(false)
val PARQUET_BLOCK_SIZE: ConfigEntry[Long] =
buildConf("parquet.block.size")
.doc("Parquet block size.")
.longConf
.createWithDefault(32 * 1024 * 1024L)
val PARQUET_COMPRESSION: ConfigEntry[String] =
buildConf("parquet.compression")
.doc(
"""
|Parquet compression type.
""".stripMargin)
.stringConf
.createWithDefault("snappy")
val PARQUET_COMPRESSION_ENABLE: ConfigEntry[Boolean] =
buildConf("parquet.compression.enable")
.doc(
"""
|Whether to use parquet compression.
""".stripMargin)
.booleanConf
.createWithDefault(true)
val BUCKET_SCAN_MULTI_PARTITION_ENABLE: ConfigEntry[Boolean] =
buildConf("bucket.scan.multi.partition.enable")
.doc(
"""
|Hash partitioned table can read multi-partition data partitioned by hash keys without shuffle,
|this parameter controls whether this feature is enabled or not.
|Using this feature, the parallelism will equal to hash bucket num.
""".stripMargin)
.booleanConf
.createWithDefault(false)
val PART_MERGE_ENABLE: ConfigEntry[Boolean] =
buildConf("part.merge.enable")
.doc(
"""
|If true, part files merging will be used to avoid OOM when it has too many delta files.
""".stripMargin)
.booleanConf
.createWithDefault(false)
val PART_MERGE_COMPACTION_COMMIT_ENABLE: ConfigEntry[Boolean] =
buildConf("part.merge.compaction.commit.enable")
.doc(
"""
|If true, it will commit the compacted files into meta store, and the later reader can read faster.
|Note that if you read a column by self-defined merge operator, the compacted result should also use
|this merge operator, make sure that the result is expected or disable compaction commit.
""".stripMargin)
.booleanConf
.createWithDefault(true)
val PART_MERGE_FILE_MINIMUM_NUM: ConfigEntry[Int] =
buildConf("part.merge.file.minimum.num")
.doc(
"""
|If delta file num more than this count, we will check for part merge.
""".stripMargin)
.intConf
.createWithDefault(5)
val PART_MERGE_FILE_SIZE_FACTOR: ConfigEntry[Double] =
buildConf("part.merge.file.size.factor")
.doc(
"""
|File size factor to calculate part merge max size.
|Expression: PART_MERGE_FILE_MINIMUM_NUM * PART_MERGE_FILE_SIZE_FACTOR * 128M
""".stripMargin)
.doubleConf
.createWithDefault(0.1)
val ASYNC_IO_ENABLE: ConfigEntry[Boolean] =
buildConf("async.io.enable")
.doc(
"""
|Whether async reader/writer can be used.
""".stripMargin)
.booleanConf
.createWithDefault(true)
val AUTO_UPDATE_MATERIAL_VIEW_ENABLE: ConfigEntry[Boolean] =
buildConf("auto.update.materialView.enable")
.doc(
"""
|Whether update material views when data changed.
|If true, it will check all material views associate with
""".stripMargin)
.booleanConf
.createWithDefault(false)
val ALLOW_STALE_MATERIAL_VIEW: ConfigEntry[Boolean] =
buildConf("allow.stale.materialView")
.doc(
"""
|If true, material view with stale data will be read.
""".stripMargin)
.booleanConf
.createWithDefault(false)
val MATERIAL_QUERY_REWRITE_ENABLE: ConfigEntry[Boolean] =
buildConf("material.query.rewrite.enable")
.doc(
"""
|If true, material view can be used to rewrite query plan.
""".stripMargin)
.booleanConf
.createWithDefault(true)
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/DataOperation.scala
|
<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import org.apache.spark.internal.Logging
import org.apache.spark.sql.star.exception.{MetaRerunErrors, StarLakeErrors}
import org.apache.spark.sql.star.utils.{DataFileInfo, PartitionInfo, undoLogInfo}
import scala.collection.mutable.ArrayBuffer
object DataOperation extends Logging {
private val cassandraConnector = MetaUtils.cassandraConnector
private val database = MetaUtils.DATA_BASE
private val long_max_value: Long = Long.MaxValue
def getTableDataInfo(partition_info_arr: Array[PartitionInfo]): Array[DataFileInfo] = {
var file_info_buf = new ArrayBuffer[DataFileInfo]()
for (partition_info <- partition_info_arr) {
val table_id = partition_info.table_id
val range_id = partition_info.range_id
val range_value = partition_info.range_value
val read_version = partition_info.read_version
file_info_buf ++= getSinglePartitionDataInfo(
table_id,
range_id,
range_value,
read_version,
true)
}
file_info_buf.toArray
}
//get fies info in this partition that match the current read version
def getSinglePartitionDataInfo(table_id: String,
range_id: String,
range_value: String,
read_version: Long,
allow_filtering: Boolean = false): ArrayBuffer[DataFileInfo] = {
val partition_values = if (!range_value.equalsIgnoreCase(MetaUtils.DEFAULT_RANGE_PARTITION_VALUE)) {
MetaUtils.getPartitionMapFromKey(range_value)
} else {
Map.empty[String, String]
}
var file_arr_buf = new ArrayBuffer[DataFileInfo]()
cassandraConnector.withSessionDo(session => {
//allow cassandra to do its own filtering
if (allow_filtering) {
val res = session.executeAsync(
s"""
|select file_path,size,modification_time,file_exist_cols,write_version,is_base_file
|from $database.data_info
|where table_id='$table_id' and range_id='$range_id'
|and write_version<=$read_version and expire_version>$read_version allow filtering
""".stripMargin)
val itr = res.getUninterruptibly.iterator()
while (itr.hasNext) {
val row = itr.next()
file_arr_buf += DataFileInfo(
row.getString("file_path"),
partition_values,
row.getLong("size"),
row.getLong("modification_time"),
row.getLong("write_version"),
row.getBool("is_base_file"),
row.getString("file_exist_cols"))
}
file_arr_buf
} else {
val res = session.executeAsync(
s"""
|select file_path,size,modification_time,file_exist_cols,write_version,expire_version,
|write_version,is_base_file
|from $database.data_info
|where table_id='$table_id' and range_id='$range_id'
""".stripMargin)
val itr = res.getUninterruptibly.iterator()
while (itr.hasNext) {
val row = itr.next()
val write_version = row.getLong("write_version")
val expire_version = row.getLong("expire_version")
//write_version<=current_read_version and expire_version > current_read_version
if (write_version <= read_version && expire_version > read_version) {
file_arr_buf += DataFileInfo(
row.getString("file_path"),
partition_values,
row.getLong("size"),
row.getLong("modification_time"),
row.getLong("write_version"),
row.getBool("is_base_file"),
row.getString("file_exist_cols"))
}
}
file_arr_buf
}
})
}
//redo add file
def redoAddedNewDataFile(info: undoLogInfo): Unit = {
addNewDataFile(
info.table_id,
info.range_id,
info.file_path,
info.write_version,
info.commit_id,
info.size,
info.modification_time,
info.file_exist_cols,
info.is_base_file)
}
//add new data info to table data_info
def addNewDataFile(table_id: String,
range_id: String,
file_path: String,
write_version: Long,
commit_id: String,
size: Long,
modification_time: Long,
file_exist_cols: String,
is_base_file: Boolean): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|insert into $database.data_info
|(table_id,range_id,file_path,write_version,expire_version,
|commit_id,size,modification_time,file_exist_cols,is_base_file)
|values ('$table_id','$range_id','$file_path',$write_version,$long_max_value,'$commit_id',
|$size,$modification_time,'$file_exist_cols',$is_base_file)
""".stripMargin)
})
}
//redo expire file
def redoExpireDataFile(info: undoLogInfo): Unit = {
deleteExpireDataFile(
info.table_id,
info.range_id,
info.file_path,
info.write_version,
info.commit_id,
info.modification_time)
}
//delete expire file, update expire version to current write version without physical deletion
def deleteExpireDataFile(table_id: String,
range_id: String,
file_path: String,
write_version: Long,
commit_id: String,
modification_time: Long): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|update $database.data_info
|set expire_version=$write_version,commit_id='$commit_id',modification_time=$modification_time
|where table_id='$table_id' and range_id='$range_id' and file_path='$file_path'
""".stripMargin)
})
}
//delete file info in table data_info
def removeFileByName(table_id: String, range_id: String, read_version: Long): Unit = {
cassandraConnector.withSessionDo(session => {
val candidateFiles = new ArrayBuffer[String]()
val get = session.executeAsync(
s"""
|select file_path,expire_version
|from $database.data_info where table_id='$table_id' and range_id='$range_id'
""".stripMargin)
val itr = get.getUninterruptibly.iterator()
while (itr.hasNext) {
val file = itr.next()
if (file.getLong("expire_version") <= read_version) {
candidateFiles += file.getString("file_path")
}
}
candidateFiles.foreach(file => {
session.execute(
s"""
|delete from $database.data_info
|where table_id='$table_id' and range_id='$range_id'
|and file_path='$file'
""".stripMargin)
})
})
}
//check for files conflict
def checkDataInfo(commit_id: String,
partition_info_arr: Array[PartitionInfo],
checkAddFile: Boolean,
checkDeleteFile: Boolean): Unit = {
cassandraConnector.withSessionDo(session => {
for (partition_info <- partition_info_arr) {
//if there have other commits
if (partition_info.read_version + 1 != partition_info.pre_write_version) {
val res = session.executeAsync(
s"""
|select file_path,write_version,expire_version from $database.data_info
|where table_id='${partition_info.table_id}' and range_id='${partition_info.range_id}'
""".stripMargin)
val itr = res.getUninterruptibly.iterator()
while (itr.hasNext) {
val row = itr.next()
val file_path = row.getString("file_path")
val write_version = row.getLong("write_version")
//check whether it added new files
if (checkAddFile &&
!partition_info.read_files.isEmpty &&
write_version > partition_info.read_version) {
logInfo("!!!!!!!!!!!!!!!!! new file added err, read files: "
+ partition_info.read_files.mkString(",") + ", commit id: " + commit_id)
throw MetaRerunErrors.fileChangedException(
partition_info,
file_path,
write_version,
commit_id)
}
//check whether it deleted files which had been read or should be delete in this commit
if (checkDeleteFile &&
(partition_info.read_files.map(_.file_path).contains(file_path) ||
partition_info.expire_files.map(_.file_path).contains(file_path))
) {
if (row.getLong("expire_version") != long_max_value) {
logInfo("!!!!!!!!!!!!!!!!! file deleted err, read files: "
+ partition_info.read_files.mkString(",") + " commit id: " + commit_id)
throw MetaRerunErrors.fileDeletedException(
partition_info,
file_path,
row.getLong("expire_version"),
commit_id)
}
}
}
}
}
})
}
def deleteDataInfoByRangeId(table_id: String, range_id: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.data_info
|where table_id='$table_id' and range_id='$range_id'
""".stripMargin)
})
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/exception/MetaRetryErrors.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.exception
import com.engineplus.star.meta.MetaUtils
object MetaRetryErrors {
def failedUpdateCommitTimestampException(table_name: String,
commit_id: String): MetaRetryException = {
new MetaRetryException(
s"""
|Error: Failed to update commit timestamp because of timeout.
|You may need to reset option COMMIT_TIMEOUT, default is ${MetaUtils.COMMIT_TIMEOUT}ms.
|Error table: $table_name, commit_id: $commit_id .
""".stripMargin,
commit_id)
}
def failedMarkCommitTagException(table_name: String,
commit_id: String): MetaRetryException = {
new MetaRetryException(
s"""
|Error: Failed to mark commit tag because of timeout.
|You may need to reset option COMMIT_TIMEOUT, now is ${MetaUtils.COMMIT_TIMEOUT}ms.
|Error table: $table_name, commit_id: $commit_id .
""".stripMargin,
commit_id)
}
def tooManyCommitException(): MetaRetryException = {
new MetaRetryException(
s"""
|Warn: commit meta data failed, it may had too many commits at the same time.
""".stripMargin)
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/CompactionCommand.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import com.engineplus.star.meta.MetaVersion
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.PredicateHelper
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.execution.datasources.v2.merge.MergeDeltaParquetScan
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2ScanRelation}
import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.utils.{DataFileInfo, PartitionInfo}
import org.apache.spark.sql.star.{BatchDataFileIndexV2, SnapshotManagement, TransactionCommit}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import scala.collection.JavaConversions._
case class CompactionCommand(snapshotManagement: SnapshotManagement,
conditionString: String,
force: Boolean,
mergeOperatorInfo: Map[String, String])
extends RunnableCommand with PredicateHelper with Logging {
/**
* now:
* 1. delta file num exceed threshold value
* 2. this partition not been compacted, and last update time exceed threshold value
*/
def filterPartitionNeedCompact(spark: SparkSession,
force: Boolean,
partitionInfo: PartitionInfo): Boolean = {
val timestampLimit = System.currentTimeMillis() - spark.conf.get(StarLakeSQLConf.COMPACTION_TIME)
if (force) {
!partitionInfo.be_compacted
} else {
if (partitionInfo.delta_file_num >= spark.conf.get(StarLakeSQLConf.MAX_DELTA_FILE_NUM)) {
true
} else if (partitionInfo.last_update_timestamp <= timestampLimit
&& !partitionInfo.be_compacted) {
true
} else {
false
}
}
}
def executeCompaction(spark: SparkSession, tc: TransactionCommit, files: Seq[DataFileInfo]): Unit = {
val fileIndex = BatchDataFileIndexV2(spark, snapshotManagement, files)
val table = StarLakeTableV2(
spark,
new Path(snapshotManagement.table_name),
None,
None,
Option(fileIndex),
Option(mergeOperatorInfo)
)
val option = new CaseInsensitiveStringMap(
Map("basePath" -> tc.tableInfo.table_name, "isCompaction" -> "true"))
val scan = table.newScanBuilder(option).build()
val newReadFiles = scan.asInstanceOf[MergeDeltaParquetScan].newFileIndex.getFileInfo(Nil)
val v2Relation = DataSourceV2Relation(
table,
table.schema().toAttributes,
None,
None,
option
)
val compactDF = Dataset.ofRows(
spark,
DataSourceV2ScanRelation(
v2Relation,
scan,
table.schema().toAttributes
)
)
tc.setReadFiles(newReadFiles)
tc.setCommitType("compaction")
val newFiles = tc.writeFiles(compactDF, isCompaction = true)
tc.commit(newFiles, newReadFiles)
logInfo("=========== Compaction Success!!! ===========")
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val condition = conditionString match {
case "" => None
case _: String => Option(expr(conditionString).expr)
}
//when condition is defined, only one partition need compaction,
//else we will check whole table
if (condition.isDefined) {
val targetOnlyPredicates =
splitConjunctivePredicates(condition.get)
snapshotManagement.withNewTransaction(tc => {
val files = tc.filterFiles(targetOnlyPredicates)
//ensure only one partition execute compaction command
val partitionSet = files.map(_.range_key).toSet
if (partitionSet.isEmpty) {
throw StarLakeErrors.partitionColumnNotFoundException(condition.get, 0)
} else if (partitionSet.size > 1) {
throw StarLakeErrors.partitionColumnNotFoundException(condition.get, partitionSet.size)
}
val range_value = partitionSet.head
val table_id = tc.tableInfo.table_id
val range_id = tc.snapshot.getPartitionInfoArray
.filter(part => part.range_value.equals(range_value))
.head.range_id
val partitionInfo = MetaVersion.getSinglePartitionInfo(table_id, range_value, range_id)
lazy val hasNoDeltaFile = if (force) {
false
} else {
files.groupBy(_.file_bucket_id).forall(_._2.size == 1)
}
if (partitionInfo.be_compacted || hasNoDeltaFile) {
logInfo("== Compaction: This partition has been compacted or has no delta file.")
} else {
executeCompaction(sparkSession, tc, files)
}
})
} else {
val allInfo = MetaVersion.getAllPartitionInfo(snapshotManagement.getTableInfoOnly.table_id)
val partitionsNeedCompact = allInfo
.filter(filterPartitionNeedCompact(sparkSession, force, _))
partitionsNeedCompact.foreach(part => {
snapshotManagement.withNewTransaction(tc => {
val files = tc.getCompactionPartitionFiles(part)
val hasNoDeltaFile = if (force) {
false
} else {
files.groupBy(_.file_bucket_id).forall(_._2.size == 1)
}
if (hasNoDeltaFile) {
logInfo(s"== Partition ${part.range_value} has no delta file.")
} else {
executeCompaction(sparkSession, tc, files)
}
})
})
}
Seq.empty
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/NotSupportedDDLSuite.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import java.util.Locale
import com.engineplus.star.tables.StarTable
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.test.{SQLTestUtils, SharedSparkSession}
import org.apache.spark.sql.{AnalysisException, QueryTest}
import scala.util.control.NonFatal
class NotSupportedDDLSuite
extends NotSupportedDDLBase
with SharedSparkSession
with StarLakeSQLCommandTest
abstract class NotSupportedDDLBase extends QueryTest
with SQLTestUtils {
val format = "star"
val nonPartitionedTableName = "starTbl"
val partitionedTableName = "partitionedStarLakeTbl"
protected override def beforeAll(): Unit = {
super.beforeAll()
try {
sql(
s"""
|CREATE TABLE $nonPartitionedTableName
|USING $format
|AS SELECT 1 as a, 'a' as b
""".stripMargin)
sql(
s"""
|CREATE TABLE $partitionedTableName (a INT, b STRING, p1 INT)
|USING $format
|PARTITIONED BY (p1)
""".stripMargin)
sql(s"INSERT INTO $partitionedTableName SELECT 1, 'A', 2")
} catch {
case NonFatal(e) =>
afterAll()
throw e
}
}
protected override def afterAll(): Unit = {
try {
val location = Seq(nonPartitionedTableName, partitionedTableName).map(tbl => {
try {
Option(spark.sessionState.catalog.getTableMetadata(TableIdentifier(tbl)).location)
} catch {
case e: Exception => None
}
})
sql(s"DROP TABLE IF EXISTS $nonPartitionedTableName")
sql(s"DROP TABLE IF EXISTS $partitionedTableName")
location.foreach(loc => {
if (loc.isDefined) {
try {
StarTable.forPath(loc.get.toString).dropTable()
} catch {
case e: Exception =>
}
}
})
} finally {
super.afterAll()
}
}
private def assertUnsupported(query: String, messages: String*): Unit = {
val allErrMessages = "operation not allowed" +: messages
val e = intercept[AnalysisException] {
sql(query)
}
assert(allErrMessages.exists(err => e.getMessage.toLowerCase(Locale.ROOT).contains(err)))
}
private def assertIgnored(query: String): Unit = {
val outputStream = new java.io.ByteArrayOutputStream()
Console.withOut(outputStream) {
sql(query)
}
assert(outputStream.toString.contains("The request is ignored"))
}
test("bucketing is not supported for star tables") {
withTable("tbl") {
assertUnsupported(
s"""
|CREATE TABLE tbl(a INT, b INT)
|USING $format
|CLUSTERED BY (a) INTO 5 BUCKETS
""".stripMargin)
}
}
test("CREATE TABLE LIKE") {
withTable("tbl") {
assertUnsupported(s"CREATE TABLE tbl LIKE $nonPartitionedTableName")
}
}
test("ANALYZE TABLE PARTITION") {
assertUnsupported(s"ANALYZE TABLE $partitionedTableName PARTITION (p1) COMPUTE STATISTICS",
"analyze table is not supported for v2 tables")
}
test("ALTER TABLE ADD PARTITION") {
assertUnsupported(s"ALTER TABLE $partitionedTableName ADD PARTITION (p1=3)",
"can not alter partitions")
}
test("ALTER TABLE DROP PARTITION") {
assertUnsupported(s"ALTER TABLE $partitionedTableName DROP PARTITION (p1=2)",
"can not alter partitions")
}
test("ALTER TABLE RECOVER PARTITIONS") {
assertUnsupported(s"ALTER TABLE $partitionedTableName RECOVER PARTITIONS")
assertUnsupported(s"MSCK REPAIR TABLE $partitionedTableName")
}
test("ALTER TABLE SET SERDEPROPERTIES") {
assertUnsupported(s"ALTER TABLE $nonPartitionedTableName SET SERDEPROPERTIES (s1=3)")
}
test("ALTER TABLE RENAME TO") {
assertUnsupported(s"ALTER TABLE $nonPartitionedTableName RENAME TO newTbl")
}
test("LOAD DATA") {
assertUnsupported(
s"""LOAD DATA LOCAL INPATH '/path/to/home' INTO TABLE $nonPartitionedTableName""",
"load data is not supported for v2 tables")
}
test("INSERT OVERWRITE DIRECTORY") {
assertUnsupported(s"INSERT OVERWRITE DIRECTORY '/path/to/home' USING $format VALUES (1, 'a')")
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/schema/InvariantCheckerExec.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.schema
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, BindReferences, Expression, GetStructField, Literal, SortOrder}
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.schema.Invariants.NotNull
import org.apache.spark.sql.types.{NullType, StructType}
/**
* A physical operator that validates records, before they are written into StarTable. Each row
* is left unchanged after validations.
*/
case class InvariantCheckerExec(child: SparkPlan,
invariants: Seq[Invariant]) extends UnaryExecNode {
override def output: Seq[Attribute] = child.output
private def isNullNotOkay(invariant: Invariant): Boolean = invariant.rule match {
case NotNull => true
case _ => false
}
/** Build extractors to access the column an invariant is defined on. */
private def buildExtractors(invariant: Invariant): Option[Expression] = {
assert(invariant.column.nonEmpty)
val topLevelColumn = invariant.column.head
val topLevelRefOpt = output.collectFirst {
case a: AttributeReference if SchemaUtils.COL_RESOLVER(a.name, topLevelColumn) => a
}
val rejectColumnNotFound = isNullNotOkay(invariant)
if (topLevelRefOpt.isEmpty) {
if (rejectColumnNotFound) {
throw StarLakeErrors.notNullInvariantException(invariant)
}
}
if (invariant.column.length == 1) {
topLevelRefOpt.map(BindReferences.bindReference[Expression](_, output))
} else {
topLevelRefOpt.flatMap { topLevelRef =>
val boundTopLevel = BindReferences.bindReference[Expression](topLevelRef, output)
try {
val nested = invariant.column.tail.foldLeft(boundTopLevel) { case (e, fieldName) =>
e.dataType match {
case StructType(fields) =>
val ordinal = fields.indexWhere(f =>
SchemaUtils.COL_RESOLVER(f.name, fieldName))
if (ordinal == -1) {
throw new IndexOutOfBoundsException(s"Not nullable column not found in struct: " +
s"${fields.map(_.name).mkString("[", ",", "]")}")
}
GetStructField(e, ordinal, Some(fieldName))
case _ =>
throw new UnsupportedOperationException(
"Invariants on nested fields other than StructTypes are not supported.")
}
}
Some(nested)
} catch {
case i: IndexOutOfBoundsException if rejectColumnNotFound =>
throw InvariantViolationException(invariant, i.getMessage)
case _: IndexOutOfBoundsException if !rejectColumnNotFound =>
None
}
}
}
}
override protected def doExecute(): RDD[InternalRow] = {
if (invariants.isEmpty) return child.execute()
val boundRefs = invariants.map { invariant =>
CheckInvariant(buildExtractors(invariant).getOrElse(Literal(null, NullType)), invariant)
}
child.execute().mapPartitionsInternal { rows =>
val assertions = GenerateUnsafeProjection.generate(boundRefs)
rows.map { row =>
assertions(row)
row
}
}
}
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def outputPartitioning: Partitioning = child.outputPartitioning
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/StarSinkSuite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import java.io.File
import java.util.Locale
import com.engineplus.star.meta.{MetaVersion, StreamingRecord}
import org.apache.spark.sql.streaming.StreamTest
import org.apache.commons.io.FileUtils
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql._
import org.apache.spark.sql.execution.DataSourceScanExec
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceRDD}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.functions._
import org.apache.spark.sql.star.test.StarLakeTestUtils
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.types._
class StarSinkSuite extends StreamTest with StarLakeTestUtils {
override val streamingTimeout = 60.seconds
import testImplicits._
private def withTempDirs(f: (File, File) => Unit): Unit = {
withTempDir { file1 =>
withTempDir { file2 =>
f(file1, file2)
}
}
}
test("append mode") {
failAfter(streamingTimeout) {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val df = inputData.toDF()
val query = df.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1)
query.processAllAvailable()
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
checkDatasetUnorderly(outputDf.as[Int], 1)
val snapshotManagement = SnapshotManagement(outputDir.getCanonicalPath)
val tableId = snapshotManagement.snapshot.getTableInfo.table_id
var info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 0L)
inputData.addData(2)
query.processAllAvailable()
checkDatasetUnorderly(outputDf.as[Int], 1, 2)
info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 1L)
inputData.addData(3)
query.processAllAvailable()
checkDatasetUnorderly(outputDf.as[Int], 1, 2, 3)
info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 2L)
} finally {
query.stop()
}
}
}
}
test("complete mode") {
failAfter(streamingTimeout) {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val df = inputData.toDF()
val query =
df.groupBy().count()
.writeStream
.outputMode("complete")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1)
query.processAllAvailable()
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
checkDatasetUnorderly(outputDf.as[Long], 1L)
val snapshotManagement = SnapshotManagement(outputDir.getCanonicalPath)
val tableId = snapshotManagement.snapshot.getTableInfo.table_id
var info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 0L)
inputData.addData(2)
query.processAllAvailable()
checkDatasetUnorderly(outputDf.as[Long], 2L)
info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 1L)
inputData.addData(3)
query.processAllAvailable()
checkDatasetUnorderly(outputDf.as[Long], 3L)
info = StreamingRecord.getStreamingInfo(tableId)
assert(info._1.equals(query.id.toString) && info._2 == 2L)
} finally {
query.stop()
}
}
}
}
test("update mode: only supported with hash partition") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val e = intercept[AnalysisException] {
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.outputMode("update")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
}
assert(e.getMessage().contains("only support Update output mode with hash partition"))
val query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.outputMode("update")
.option("hashPartitions", "id")
.option("hashBucketNum", "2")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
.select("id", "value")
val expectedSchema = new StructType()
.add(StructField("id", IntegerType, false))
.add(StructField("value", IntegerType))
assert(outputDf.schema === expectedSchema)
// Verify the data is correctly read
checkDatasetUnorderly(
outputDf.as[(Int, Int)],
(1, 1000), (2, 2000), (3, 3000))
val snapshotManagement = SnapshotManagement(outputDir.getCanonicalPath)
val tableInfo = MetaVersion.getTableInfo(snapshotManagement.table_name)
assert(tableInfo.hash_column.equals("id")
&& tableInfo.range_column.isEmpty
&& tableInfo.bucket_num == 2)
} finally {
if (query != null) {
query.stop()
}
}
}
}
test("path not specified") {
failAfter(streamingTimeout) {
withTempDir { checkpointDir =>
val inputData = MemoryStream[Int]
val df = inputData.toDF()
val e = intercept[IllegalArgumentException] {
df.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start()
}
Seq("path", " not specified").foreach { msg =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(msg))
}
}
}
}
test("range partitioned writing and batch reading") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
.select("id", "value")
val expectedSchema = new StructType()
.add(StructField("id", IntegerType, false))
.add(StructField("value", IntegerType))
assert(outputDf.schema === expectedSchema)
// Verify the data is correctly read
checkDatasetUnorderly(
outputDf.as[(Int, Int)],
(1, 1000), (2, 2000), (3, 3000))
val snapshotManagement = SnapshotManagement(outputDir.getCanonicalPath)
val tableInfo = MetaVersion.getTableInfo(snapshotManagement.table_name)
assert(tableInfo.range_column.equals("id")
&& tableInfo.hash_column.isEmpty
&& tableInfo.bucket_num == -1)
} finally {
if (query != null) {
query.stop()
}
}
}
}
test("range and hash partitioned writing and batch reading") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val query =
ds.map(i => (i, i, i * 1000))
.toDF("range", "hash", "value")
.writeStream
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "2")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1, 2, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
.select("range", "hash", "value")
val expectedSchema = new StructType()
.add(StructField("range", IntegerType, false))
.add(StructField("hash", IntegerType, false))
.add(StructField("value", IntegerType))
assert(outputDf.schema === expectedSchema)
// Verify the data is correctly read
checkDatasetUnorderly(
outputDf.as[(Int, Int, Int)],
(1, 1, 1000), (2, 2, 2000), (3, 3, 3000))
val snapshotManagement = SnapshotManagement(outputDir.getCanonicalPath)
val tableInfo = MetaVersion.getTableInfo(snapshotManagement.table_name)
assert(tableInfo.range_column.equals("range")
&& tableInfo.hash_column.equals("hash")
&& tableInfo.bucket_num == 2)
} finally {
if (query != null) {
query.stop()
}
}
}
}
test("work with aggregation + watermark") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Long]
val inputDF = inputData.toDF.toDF("time")
val outputDf = inputDF
.selectExpr("CAST(time AS timestamp) AS timestamp")
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds"))
.count()
.select("window.start", "window.end", "count")
val query =
outputDf.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
def addTimestamp(timestampInSecs: Int*): Unit = {
inputData.addData(timestampInSecs.map(_ * 1L): _*)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
def check(expectedResult: ((Long, Long), Long)*): Unit = {
val outputDf = spark.read.format("star").load(outputDir.getCanonicalPath)
.selectExpr(
"CAST(start as BIGINT) AS start",
"CAST(end as BIGINT) AS end",
"count")
checkDatasetUnorderly(
outputDf.as[(Long, Long, Long)],
expectedResult.map(x => (x._1._1, x._1._2, x._2)): _*)
}
addTimestamp(100) // watermark = None before this, watermark = 100 - 10 = 90 after this
addTimestamp(104, 123) // watermark = 90 before this, watermark = 123 - 10 = 113 after this
addTimestamp(140) // wm = 113 before this, emit results on 100-105, wm = 130 after this
check((100L, 105L) -> 2L, (120L, 125L) -> 1L) // no-data-batch emits results on 120-125
} finally {
if (query != null) {
query.stop()
}
}
}
}
test("throw exception when users are trying to write in batch with different partitioning") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val e = intercept[AnalysisException] {
spark.range(100)
.select('id.cast("integer"), 'id % 4 as 'by4, 'id.cast("integer") * 1000 as 'value)
.write
.format("star")
.partitionBy("id", "by4")
.mode("append")
.save(outputDir.getCanonicalPath)
}
assert(e.getMessage.contains("Range partition column `id` was already set"))
} finally {
query.stop()
}
}
}
test("incompatible schema merging throws errors - first streaming then batch") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
try {
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val e = intercept[AnalysisException] {
spark.range(100).select('id, ('id * 3).cast("string") as 'value)
.write
.partitionBy("id")
.format("star")
.mode("append")
.save(outputDir.getCanonicalPath)
}
assert(e.getMessage.contains("incompatible"))
} finally {
query.stop()
}
}
}
test("incompatible schema merging throws errors - first batch then streaming") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val dsWriter =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
spark.range(100).select('id, ('id * 3).cast("string") as 'value)
.write
.format("star")
.mode("append")
.save(outputDir.getCanonicalPath)
val wrapperException = intercept[StreamingQueryException] {
val q = dsWriter.start(outputDir.getCanonicalPath)
inputData.addData(1, 2, 3)
q.processAllAvailable()
}
assert(wrapperException.cause.isInstanceOf[AnalysisException])
assert(wrapperException.cause.getMessage.contains("incompatible"))
}
}
test("can't write out with all columns being partition columns") {
withTempDirs { (outputDir, checkpointDir) =>
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id", "value")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("star")
.start(outputDir.getCanonicalPath)
val e = intercept[StreamingQueryException] {
inputData.addData(1)
query.awaitTermination(10000)
}
assert(e.cause.isInstanceOf[AnalysisException]
&& e.getMessage.contains("Cannot use all columns for partition columns"))
}
}
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/CustomConnectionFactory.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import com.datastax.driver.core._
import com.datastax.driver.core.policies.{ConstantReconnectionPolicy, DowngradingConsistencyRetryPolicy, RoundRobinPolicy, TokenAwarePolicy}
import com.datastax.spark.connector.cql.{CassandraConnectionFactory, CassandraConnectorConf}
object CustomConnectionFactory extends CassandraConnectionFactory {
protected def clusterBuilder(conf: CassandraConnectorConf): Cluster.Builder = {
val builder = Cluster.builder()
val poolingOpts = new PoolingOptions
poolingOpts.setMaxRequestsPerConnection(HostDistance.LOCAL, 600)
poolingOpts.setMaxRequestsPerConnection(HostDistance.REMOTE, 600)
poolingOpts.setMaxConnectionsPerHost(HostDistance.LOCAL, 600)
poolingOpts.setMaxConnectionsPerHost(HostDistance.REMOTE, 600)
poolingOpts.setIdleTimeoutSeconds(600)
poolingOpts.setCoreConnectionsPerHost(HostDistance.LOCAL, 20)
poolingOpts.setCoreConnectionsPerHost(HostDistance.REMOTE, 20)
poolingOpts.setHeartbeatIntervalSeconds(60)
poolingOpts.setPoolTimeoutMillis(1000 * 60)
builder.addContactPoints(conf.hosts.toSeq: _*)
.withPort(conf.port)
.withPoolingOptions(poolingOpts)
.withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE)
.withReconnectionPolicy(new ConstantReconnectionPolicy(100L))
.withSocketOptions(new SocketOptions()
.setTcpNoDelay(true)
.setConnectTimeoutMillis(MetaUtils.META_CONNECT_TIMEOUT)
.setReadTimeoutMillis(MetaUtils.META_READ_TIMEOUT))
.withCredentials(MetaUtils.META_USERNAME, MetaUtils.META_PASSWORD)
.withCompression(ProtocolOptions.Compression.LZ4)
.withQueryOptions(new QueryOptions()
.setConsistencyLevel(ConsistencyLevel.ALL)
.setSerialConsistencyLevel(ConsistencyLevel.SERIAL))
.withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy()))
}
override def createCluster(conf: CassandraConnectorConf): Cluster = {
clusterBuilder(conf).build()
}
}
|
engine-plus/StarLake
|
src/test/scala/com/engineplus/star/meta/MetaCommitSuite.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import java.util.concurrent.{Executors, TimeUnit}
import com.engineplus.star.meta.MetaCommit.generateCommitIdToAddUndoLog
import com.engineplus.star.tables.StarTable
import org.apache.spark.sql.star.exception.MetaRerunException
import org.apache.spark.sql.star.test.StarLakeTestUtils
import org.apache.spark.sql.star.utils.{CommitOptions, DataFileInfo, MetaInfo, PartitionInfo}
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest}
import scala.collection.mutable.ArrayBuffer
trait MetaCommitSuiteBase extends QueryTest
with SharedSparkSession with StarLakeTestUtils {
override def beforeAll(): Unit = {
super.beforeAll()
MetaTableManage.initDatabaseAndTables()
}
import testImplicits._
def initTable(tablePath: String): Unit = {
Seq(("a", 1), ("b", 2), ("c", 3)).toDF("key", "value")
.write.partitionBy("key").format("star").mode("append")
.save(tablePath)
}
def initHashTable(tablePath: String): Unit = {
Seq(("a", 1, 1), ("b", 1, 2), ("c", 1, 3)).toDF("key", "hash", "value")
.write.partitionBy("key")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.format("star").mode("append")
.save(tablePath)
}
def addDataFileInfo(key: String, num: Int): Seq[DataFileInfo] = {
Seq(DataFileInfo(
"addFile:" + key + "_" + num,
MetaUtils.getPartitionMapFromKey(key),
233,
456,
-1,
true,
"test_value"
))
}
def getNewPartInfoWithAddFile(partitionInfoArr: Array[PartitionInfo],
num: Int): Array[PartitionInfo] = {
partitionInfoArr.map(m => {
val files = DataOperation
.getSinglePartitionDataInfo(m.table_id, m.range_id, m.range_value, m.read_version)
.toArray
m.copy(
read_files = files,
add_files = addDataFileInfo(m.range_value, num).toArray,
)
})
}
def getNewPartInfoWithAddAndExpireFile(partitionInfoArr: Array[PartitionInfo],
num: Int): Array[PartitionInfo] = {
partitionInfoArr.map(m => {
val files = DataOperation
.getSinglePartitionDataInfo(m.table_id, m.range_id, m.range_value, m.read_version)
.toArray
m.copy(
read_files = files,
add_files = addDataFileInfo(m.range_value, num).toArray,
expire_files = files
)
})
}
def commitTest(commitType: String, changeSchema: Boolean): Unit = {
test(s"$commitType commit, change schema: $changeSchema") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
var partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val newPartitionInfoArr = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 1)
val metaInfo = MetaInfo(
tableInfo,
newPartitionInfoArr,
CommitType(commitType))
MetaCommit.doMetaCommit(metaInfo, changeSchema, CommitOptions(None, None))
partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
assert(partitionInfoArr.map(_.read_version).forall(_ == 2))
assert(partitionInfoArr.forall(m => {
val fileName = "addFile:" + m.range_value + "_" + 1
DataOperation
.getSinglePartitionDataInfo(m.table_id, m.range_id, m.range_value, m.read_version)
.head
.file_path
.equals(fileName)
}))
})
}
}
def concurrentCommit(commitType: String, partitionNum: String, taskNum: Int, derange: Boolean): Unit = {
test(s"concurrent $commitType commit, change schema: false - $partitionNum partition, derange: $derange") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
var partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val arrMetaInfo = new ArrayBuffer[MetaInfo]()
for (i <- 0 until taskNum) {
val newPartitionInfoArr = partitionNum match {
case "single" => getNewPartInfoWithAddFile(Array(partitionInfoArr.head), i)
case "multiple" =>
if (derange) {
//disturb order
getNewPartInfoWithAddFile(partitionInfoArr, i)
.map(m => (m, scala.util.Random.nextInt(partitionInfoArr.length * 3)))
.sortBy(_._2)
.map(_._1)
} else {
getNewPartInfoWithAddFile(partitionInfoArr, i)
}
case _ => throw new Exception("Illegal partitionNum, it must be 'single' or 'multiple'")
}
//
arrMetaInfo += MetaInfo(
tableInfo,
newPartitionInfoArr,
CommitType(commitType)
)
}
val pool = Executors.newFixedThreadPool(taskNum)
for (i <- 0 until taskNum) {
pool.execute(new Runnable {
override def run(): Unit = {
MetaCommit.doMetaCommit(arrMetaInfo(i), false, CommitOptions(None, None))
}
})
}
pool.shutdown()
pool.awaitTermination(20, TimeUnit.MINUTES)
partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
assert(partitionInfoArr.head.read_version == taskNum + 1)
})
}
}
def getNewPartitionDFSeq(num: Int): Seq[DataFrame] = {
(0 until num).map(i => {
Seq(("d", 1, i)).toDF("key", "hash", "value")
})
}
}
class MetaCommitSuite extends MetaCommitSuiteBase {
Seq("simple", "delta", "compaction")
.foreach(commitTest(_, false))
Seq("delta")
.foreach(t => {
Seq("single", "multiple")
.foreach(f => {
if (f.equals("single")) {
concurrentCommit(t, f, 5, false)
} else if (f.equals("multiple")) {
concurrentCommit(t, f, 5, false)
concurrentCommit(t, f, 3, true)
}
})
})
test("Committing state will roll back when timeout") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
var partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val newPartitionInfoArr1 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 1)
val metaInfo1 = MetaInfo(
tableInfo,
newPartitionInfoArr1,
CommitType("simple"))
val oldReadVersion = partitionInfoArr.map(_.read_version).max
val commit_id = generateCommitIdToAddUndoLog(
metaInfo1.table_info.table_name,
metaInfo1.table_info.table_id,
"",
-1L)
//only get partition lock
val newMetaInfo = MetaCommit.takePartitionsWriteLock(metaInfo1, commit_id)
val newMetaInfo1 = MetaCommit.updatePartitionInfoAndGetNewMetaInfo(newMetaInfo)
assert(newMetaInfo1.partitionInfoArray.map(_.pre_write_version).max == oldReadVersion + 1)
//new task will commit successful anyway
val newPartitionInfoArr2 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 2)
val metaInfo2 = MetaInfo(
tableInfo,
newPartitionInfoArr2,
CommitType("simple"))
MetaCommit.doMetaCommit(metaInfo2, false, CommitOptions(None, None))
partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
assert(
partitionInfoArr.map(_.read_version).max == oldReadVersion + 1 &&
partitionInfoArr.forall(m => {
val fileName = "addFile:" + m.range_value + "_" + 2
DataOperation
.getSinglePartitionDataInfo(m.table_id, m.range_id, m.range_value, m.read_version)
.head
.file_path
.equals(fileName)
})
)
})
}
test("check files conflict - files change while commit will throw MetaRerunException") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
val partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val newPartitionInfoArr1 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 1)
val newPartitionInfoArr2 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 2)
val metaInfo1 = MetaInfo(
tableInfo,
newPartitionInfoArr1,
CommitType("simple"))
val metaInfo2 = MetaInfo(
tableInfo,
newPartitionInfoArr2,
CommitType("simple"))
MetaCommit.doMetaCommit(metaInfo1, false, CommitOptions(None, None))
val e = intercept[MetaRerunException](
MetaCommit.doMetaCommit(metaInfo2, false, CommitOptions(None, None)))
assert(e.getMessage.contains("Another job added file"))
})
}
test("check files conflict - can't delete file twice") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
val partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val newPartitionInfoArr1 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 1)
val newPartitionInfoArr2 = getNewPartInfoWithAddAndExpireFile(partitionInfoArr, 2)
val metaInfo1 = MetaInfo(
tableInfo,
newPartitionInfoArr1,
CommitType("compaction"))
val metaInfo2 = MetaInfo(
tableInfo,
newPartitionInfoArr2,
CommitType("compaction"))
MetaCommit.doMetaCommit(metaInfo1, false, CommitOptions(None, None))
val e = intercept[MetaRerunException](
MetaCommit.doMetaCommit(metaInfo2, false, CommitOptions(None, None)))
assert(e.getMessage.contains("deleted by another job during write_version="))
})
}
test("take schema lock concurrently") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initTable(tableName)
val tableInfo = MetaVersion.getTableInfo(tableName)
val partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val newPartitionInfoArr1 = getNewPartInfoWithAddFile(partitionInfoArr, 1)
val metaInfo1 = MetaInfo(
tableInfo,
newPartitionInfoArr1,
CommitType("delta"),
"commitId1")
val newSchema2 = new StructType()
.add("key", "string")
.add("add_col2", "string")
.add("value", "integer").json
val tableInfo2 = tableInfo.copy(table_schema = newSchema2)
val newPartitionInfoArr2 = getNewPartInfoWithAddFile(partitionInfoArr, 2)
val metaInfo2 = MetaInfo(
tableInfo2,
newPartitionInfoArr2,
CommitType("delta"))
MetaCommit.takeSchemaLock(metaInfo1)
MetaCommit.doMetaCommit(metaInfo2, true, CommitOptions(None, None))
val currentTableInfo = MetaVersion.getTableInfo(tableName)
assert(currentTableInfo.schema_version == tableInfo.schema_version + 1 &&
currentTableInfo.table_schema.equals(newSchema2))
val newSchema3 = new StructType()
.add("key", "string")
.add("add_col3", "string")
.add("value", "integer").json
val tableInfo3 = tableInfo.copy(table_schema = newSchema3)
val newPartitionInfoArr3 = getNewPartInfoWithAddFile(partitionInfoArr, 3)
val metaInfo3 = MetaInfo(
tableInfo3,
newPartitionInfoArr3,
CommitType("delta"))
val e = intercept[AnalysisException] {
MetaCommit.doMetaCommit(metaInfo3, true, CommitOptions(None, None))
}
assert(e.getMessage().contains("Schema has been changed for table"))
})
}
test("create range partition concurrently") {
withTempDir(tmpDir => {
val tableName = MetaUtils.modifyTableString(tmpDir.getCanonicalPath)
initHashTable(tableName)
val taskNum = 5
val dfArr = getNewPartitionDFSeq(taskNum)
val table = StarTable.forPath(tableName)
val pool = Executors.newFixedThreadPool(taskNum)
for (i <- 0 until taskNum) {
pool.execute(new Runnable {
override def run(): Unit = {
table.upsert(dfArr(i))
}
})
}
pool.shutdown()
pool.awaitTermination(20, TimeUnit.MINUTES)
val tableInfo = MetaVersion.getTableInfo(tableName)
val partitionInfoArr = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
assert(partitionInfoArr.filter(_.range_value.equals("key=d")).head.read_version == taskNum)
})
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/StarLakeConfig.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import java.util.{HashMap, Locale}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.utils.TableInfo
case class StarLakeConfig[T](key: String,
defaultValue: String,
fromString: String => T,
validationFunction: T => Boolean,
helpMessage: String) {
/**
* Recover the saved value of this configuration from `TableInfo` or return the default if this
* value hasn't been changed.
*/
def fromTableInfo(table_info: TableInfo): T = {
fromString(table_info.configuration.getOrElse(key, defaultValue))
}
/** Validate the setting for this configuration */
private def validate(value: String): Unit = {
val onErrorMessage = s"$key $helpMessage"
try {
require(validationFunction(fromString(value)), onErrorMessage)
} catch {
case e: NumberFormatException =>
throw new IllegalArgumentException(onErrorMessage, e)
}
}
/**
* Validate this configuration and return the key - value pair to save into the metadata.
*/
def apply(value: String): (String, String) = {
validate(value)
key -> value
}
}
/**
* Contains list of reservoir configs and validation checks.
*/
object StarLakeConfig extends Logging {
/**
* A global default value set as a SQLConf will overwrite the default value of a StarLakeConfig.
* For example, user can run:
* set spark.databricks.delta.properties.defaults.randomPrefixLength = 5
* This setting will be populated to a StarTable during its creation time and overwrites
* the default value of delta.randomPrefixLength.
*
* We accept these SQLConfs as strings and only perform validation in StarLakeConfig. All the
* StarLakeConfigs set in SQLConf should adopt the same prefix.
*/
val sqlConfPrefix = "spark.engineplus.star.properties.defaults."
private val entries = new HashMap[String, StarLakeConfig[_]]
private def buildConfig[T](key: String,
defaultValue: String,
fromString: String => T,
validationFunction: T => Boolean,
helpMessage: String): StarLakeConfig[T] = {
val starConfig = StarLakeConfig(s"star.$key",
defaultValue,
fromString,
validationFunction,
helpMessage)
entries.put(key.toLowerCase(Locale.ROOT), starConfig)
starConfig
}
/**
* Validates specified configurations and returns the normalized key -> value map.
*/
def validateConfigurations(configurations: Map[String, String]): Map[String, String] = {
configurations.map {
case (key, value) if key.toLowerCase(Locale.ROOT).startsWith("star.") =>
Option(entries.get(key.toLowerCase(Locale.ROOT).stripPrefix("star.")))
.map(_ (value))
.getOrElse {
throw StarLakeErrors.unknownConfigurationKeyException(key)
}
case keyvalue@(key, _) =>
if (entries.containsKey(key.toLowerCase(Locale.ROOT))) {
logInfo(
s"""
|You are trying to set a property the key of which is the same as star config: $key.
|If you are trying to set a star config, prefix it with "star.", e.g. 'star.$key'.
""".stripMargin)
}
keyvalue
}
}
/**
* Fetch global default values from SQLConf.
*/
def mergeGlobalConfigs(sqlConfs: SQLConf,
tableConf: Map[String, String]): Map[String, String] = {
import collection.JavaConverters._
val globalConfs = entries.asScala.flatMap { case (key, config) =>
val sqlConfKey = sqlConfPrefix + config.key.stripPrefix("star.")
Option(sqlConfs.getConfString(sqlConfKey, null)) match {
case Some(default) => Some(config(default))
case _ => None
}
}
val updatedConf = globalConfs.toMap ++ tableConf
updatedConf
}
/**
* Normalize the specified property keys if the key is for a star config.
*/
def normalizeConfigKeys(propKeys: Seq[String]): Seq[String] = {
propKeys.map {
case key if key.toLowerCase(Locale.ROOT).startsWith("star.") =>
Option(entries.get(key.toLowerCase(Locale.ROOT).stripPrefix("star.")))
.map(_.key).getOrElse(key)
case key => key
}
}
/**
* Normalize the specified property key if the key is for a star config.
*/
def normalizeConfigKey(propKey: Option[String]): Option[String] = {
propKey.map {
case key if key.toLowerCase(Locale.ROOT).startsWith("star.") =>
Option(entries.get(key.toLowerCase(Locale.ROOT).stripPrefix("star.")))
.map(_.key).getOrElse(key)
case key => key
}
}
/**
* Whether this star table is append-only. Files can't be deleted, or values can't be updated.
*/
val IS_APPEND_ONLY = buildConfig[Boolean](
"appendOnly",
"false",
_.toBoolean,
_ => true,
"needs to be a boolean.")
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/StarLakeOptions.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.StarLakeOptions._
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.sources.{StarLakeDataSource, StarLakeSQLConf}
import scala.util.Try
trait StarLakeOptionParser {
protected def sqlConf: SQLConf
protected def options: CaseInsensitiveMap[String]
def toBoolean(input: String, name: String): Boolean = {
Try(input.toBoolean).toOption.getOrElse {
throw StarLakeErrors.illegalStarLakeOptionException(name, input, "must be 'true' or 'false'")
}
}
}
trait StarLakeWriteOptions
extends StarLakeWriteOptionsImpl
with StarLakeOptionParser {
import StarLakeOptions._
val replaceWhere: Option[String] = options.get(REPLACE_WHERE_OPTION)
}
trait StarLakeWriteOptionsImpl extends StarLakeOptionParser {
/**
* Whether the user has enabled auto schema merging in writes using either a DataFrame option
* or SQL Session configuration. Automerging is off when table ACLs are enabled.
* We always respect the DataFrame writer configuration over the session config.
*/
def canMergeSchema: Boolean = {
options.get(MERGE_SCHEMA_OPTION)
.map(toBoolean(_, MERGE_SCHEMA_OPTION))
.getOrElse(sqlConf.getConf(StarLakeSQLConf.SCHEMA_AUTO_MIGRATE))
}
/**
* Whether to allow overwriting the schema of a Star table in an overwrite mode operation. If
* ACLs are enabled, we can't change the schema of an operation through a write, which requires
* MODIFY permissions, when schema changes require OWN permissions.
*/
def canOverwriteSchema: Boolean = {
options.get(OVERWRITE_SCHEMA_OPTION).exists(toBoolean(_, OVERWRITE_SCHEMA_OPTION))
}
//Compatible with df.write.partitionBy , option with "rangePartitions" has higher priority
def rangePartitions: String = {
options.get(RANGE_PARTITIONS)
.getOrElse(
options.get(PARTITION_BY)
.map(StarLakeDataSource.decodePartitioningColumns)
.getOrElse(Nil).mkString(","))
}
def hashPartitions: String = {
options.get(HASH_PARTITIONS).getOrElse("")
}
def hashBucketNum: Int = {
options.get(HASH_BUCKET_NUM).getOrElse("-1").toInt
}
def allowDeltaFile: Boolean = {
options.get(AllowDeltaFile)
.map(toBoolean(_, AllowDeltaFile))
.getOrElse(sqlConf.getConf(StarLakeSQLConf.USE_DELTA_FILE))
}
def shortTableName: Option[String] = {
val shortTableName = options.get(SHORT_TABLE_NAME).getOrElse("")
if (shortTableName.isEmpty) {
None
} else {
Some(shortTableName)
}
}
def createMaterialView: Boolean = {
options.get(CREATE_MATERIAL_VIEW).exists(toBoolean(_, CREATE_MATERIAL_VIEW))
}
def updateMaterialView: Boolean = {
options.get(UPDATE_MATERIAL_VIEW).exists(toBoolean(_, UPDATE_MATERIAL_VIEW))
}
def materialSQLText: String = {
options.get(MATERIAL_SQL_TEXT).getOrElse("")
}
def materialAutoUpdate: Boolean = {
options.get(MATERIAL_AUTO_UPDATE)
.exists(toBoolean(_, MATERIAL_AUTO_UPDATE))
}
}
/**
* Options for the star lake source.
*/
class StarLakeOptions(@transient protected[star] val options: CaseInsensitiveMap[String],
@transient protected val sqlConf: SQLConf)
extends StarLakeWriteOptions with StarLakeOptionParser with Serializable {
def this(options: Map[String, String], conf: SQLConf) = this(CaseInsensitiveMap(options), conf)
}
object StarLakeOptions {
/** An option to overwrite only the data that matches predicates over partition columns. */
val REPLACE_WHERE_OPTION = "replaceWhere"
/** An option to allow automatic schema merging during a write operation. */
val MERGE_SCHEMA_OPTION = "mergeSchema"
/** An option to allow overwriting schema and partitioning during an overwrite write operation. */
val OVERWRITE_SCHEMA_OPTION = "overwriteSchema"
val PARTITION_BY = "__partition_columns"
val RANGE_PARTITIONS = "rangePartitions"
val HASH_PARTITIONS = "hashPartitions"
val HASH_BUCKET_NUM = "hashBucketNum"
val SHORT_TABLE_NAME = "shortTableName"
val CREATE_MATERIAL_VIEW = "createStarLakeMaterialView"
val UPDATE_MATERIAL_VIEW = "updateStarLakeMaterialView"
val MATERIAL_SQL_TEXT = "materialSQLText"
val MATERIAL_AUTO_UPDATE = "materialAutoUpdate"
/** whether it is allowed to use delta file */
val AllowDeltaFile = "allowDeltaFile"
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/utils/MetaData.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.utils
import com.engineplus.star.meta.{CommitState, CommitType, MetaUtils}
import com.fasterxml.jackson.annotation.JsonIgnore
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.execution.datasources.BucketingUtils
import org.apache.spark.sql.star.material_view.QueryInfo
import org.apache.spark.sql.types.{DataType, StructType}
case class MetaInfo(table_info: TableInfo,
partitionInfoArray: Array[PartitionInfo],
commit_type: CommitType,
commit_id: String = "",
query_id: String = "",
batch_id: Long = -1L)
case class PartitionInfo(table_id: String,
range_id: String,
table_name: String,
range_value: String,
read_version: Long,
pre_write_version: Long,
read_files: Array[DataFileInfo] = Array.empty[DataFileInfo],
add_files: Array[DataFileInfo] = Array.empty[DataFileInfo],
expire_files: Array[DataFileInfo] = Array.empty[DataFileInfo],
last_update_timestamp: Long = -1L,
delta_file_num: Int = 0, //approximate value of delta files
be_compacted: Boolean = false) {
override def toString: String = {
s"partition info: {\ntable_name: $table_name,\nrange_value: $range_value,\nread_version: $read_version," +
s"\ndelta_file_num: $delta_file_num,\nbe_compacted: $be_compacted\n}"
}
}
// table_schema is json format data
// range_column and hash_column are string, not json format
//hash_partition_column contains multi keys,concat with `,`
case class TableInfo(table_name: String,
table_id: String,
table_schema: String = null,
range_column: String = "",
hash_column: String = "",
bucket_num: Int = -1,
configuration: Map[String, String] = Map.empty,
schema_version: Int = 1,
short_table_name: Option[String] = None,
is_material_view: Boolean = false) {
lazy val table_path: Path = new Path(table_name)
lazy val range_partition_columns: Seq[String] = range_partition_schema.fieldNames
lazy val hash_partition_columns: Seq[String] = hash_partition_schema.fieldNames
/** Returns the schema as a [[StructType]] */
//full table schema which contains partition columns
@JsonIgnore
lazy val schema: StructType =
Option(table_schema).map { s =>
DataType.fromJson(s).asInstanceOf[StructType]
}.getOrElse(StructType.apply(Nil))
//range partition columns
@JsonIgnore
lazy val range_partition_schema: StructType =
if (range_column.equalsIgnoreCase("")) {
StructType.apply(Nil)
} else {
StructType(range_column.split(",").map(c => schema(c)))
}
//hash partition columns
@JsonIgnore
lazy val hash_partition_schema: StructType =
if (hash_column.equalsIgnoreCase("")) {
StructType.apply(Nil)
} else {
StructType(hash_column.split(",").map(c => schema(c)))
}
//all partition columns
lazy val partition_schema: StructType = range_partition_schema.merge(hash_partition_schema)
//hash is belong to data_schema !!!
private lazy val range_partition_set: Set[String] = range_column.split(",").toSet
//all data schema except range partition columns
@JsonIgnore
lazy val data_schema: StructType = StructType(schema.filterNot(f => range_partition_set.contains(f.name)))
lazy val partition_cols: Seq[String] = {
var seq = Seq.empty[String]
if (range_column.nonEmpty) {
seq = seq ++ range_column.split(",")
}
if (hash_column.nonEmpty) {
seq = seq ++ hash_column.split(",")
}
seq
}
lazy val format: Format = Format()
}
//single file info
case class DataFileInfo(file_path: String,
range_partitions: Map[String, String],
size: Long,
modification_time: Long,
write_version: Long,
is_base_file: Boolean,
file_exist_cols: String = "") {
lazy val range_key: String = MetaUtils.getPartitionKeyFromMap(range_partitions)
//identify for merge read
lazy val range_version: String = range_key + "-" + write_version.toString
lazy val file_bucket_id: Int = BucketingUtils
.getBucketId(new Path(file_path).getName)
.getOrElse(sys.error(s"Invalid bucket file $file_path"))
//trans to files which need to delete
def expire(deleteTime: Long): DataFileInfo = this.copy(modification_time = deleteTime)
}
case class PartitionFilterInfo(range_id: String,
range_value: String,
range_partitions: Map[String, String],
read_version: Long)
/**
* commit state info
*
* @param state commit state
* @param commit_id commit id
* @param tag identifier to redo or rollback
* @param timestamp timestamp of commit
*/
case class commitStateInfo(state: CommitState.Value,
table_name: String,
table_id: String,
commit_id: String,
tag: Int,
timestamp: Long)
/**
* undo log info
*
* @param tag commit identifier,0 is committing,greater than 0 is rollback,-1 is redoing
*/
case class undoLogInfo(commit_type: String,
table_id: String,
commit_id: String,
range_id: String,
file_path: String,
table_name: String,
range_value: String,
tag: Int,
write_version: Long,
timestamp: Long,
size: Long,
modification_time: Long,
table_schema: String,
setting: Map[String, String],
file_exist_cols: String,
delta_file_num: Int,
be_compacted: Boolean,
is_base_file: Boolean,
query_id: String,
batch_id: Long,
short_table_name: String,
sql_text: String,
relation_tables: String,
auto_update: Boolean,
is_creating_view: Boolean,
view_info: String)
case class Format(provider: String = "parquet",
options: Map[String, String] = Map.empty)
case class CommitOptions(shortTableName: Option[String],
materialInfo: Option[MaterialViewInfo])
/**
* Material View Info
*
* @param sqlText sql in text format to create material view
* @param relationTables relation tables, the value is a format "table_id->table_name,table1->oss://test/path"
*/
case class MaterialViewInfo(viewName: String,
sqlText: String,
relationTables: Seq[RelationTable],
autoUpdate: Boolean,
isCreatingView: Boolean = false,
info: QueryInfo)
case class RelationTable(tableName: String,
tableId: String,
partitionInfo: Seq[(String, String)]) {
override def toString: String = {
tableName + "\001" + tableId + "\001" + partitionInfo.sortBy(_._1).map(m => m._1 + "->" + m._2).mkString("\002")
}
}
object RelationTable {
def build(relationTables: String): RelationTable = {
val split = relationTables.split("\001")
val tableName = split(0)
val tableId = split(1)
val partitionInfo = split(2).split("\002").map(m => {
val part = m.split("->")
(part(0), part(1))
})
RelationTable(tableName, tableId, partitionInfo)
}
}
|
engine-plus/StarLake
|
src/test/scala/com/engineplus/star/tables/StarTableSuite.scala
|
<reponame>engine-plus/StarLake<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.tables
import java.util.Locale
import org.apache.spark.sql.star.StarLakeUtils
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.{AnalysisException, QueryTest}
class StarTableSuite extends QueryTest
with SharedSparkSession
with StarLakeSQLCommandTest {
test("forPath") {
withTempDir { dir =>
testData.write.format("star").save(dir.getAbsolutePath)
checkAnswer(
StarTable.forPath(spark, dir.getAbsolutePath).toDF,
testData.collect().toSeq)
checkAnswer(
StarTable.forPath(dir.getAbsolutePath).toDF,
testData.collect().toSeq)
}
}
test("forName") {
withTempDir { dir =>
withTable("starTable") {
testData.write.format("star").saveAsTable("starTable")
checkAnswer(
StarTable.forName(spark, "starTable").toDF,
testData.collect().toSeq)
checkAnswer(
StarTable.forName("starTable").toDF,
testData.collect().toSeq)
}
}
}
def testForNameOnNonStarLakeName(tableName: String): Unit = {
val msg = "not an Star table"
testError(msg) {
StarTable.forName(spark, tableName)
}
testError(msg) {
StarTable.forName(tableName)
}
}
test("forName - with non-Star table name") {
withTempDir { dir =>
withTable("notAnStarLakeTable") {
testData.write.format("parquet").mode("overwrite")
.saveAsTable("notAStarLakeTable")
testForNameOnNonStarLakeName("notAnStarLakeTable")
}
}
}
test("forName - with temp view name") {
withTempDir { dir =>
withTempView("viewOnStarLakeTable") {
testData.write.format("star").save(dir.getAbsolutePath)
spark.read.format("star").load(dir.getAbsolutePath)
.createOrReplaceTempView("viewOnStarLakeTable")
testForNameOnNonStarLakeName("viewOnStarLakeTable")
}
}
}
test("forName - with star.`path`") {
withTempDir { dir =>
testData.write.format("star").save(dir.getAbsolutePath)
testForNameOnNonStarLakeName(s"star.`$dir`")
}
}
test("as") {
withTempDir { dir =>
testData.write.format("star").save(dir.getAbsolutePath)
checkAnswer(
StarTable.forPath(dir.getAbsolutePath).as("tbl").toDF.select("tbl.value"),
testData.select("value").collect().toSeq)
}
}
test("isStarLakeTable - path") {
withTempDir { dir =>
testData.write.format("star").save(dir.getAbsolutePath)
assert(StarLakeUtils.isStarLakeTable(dir.getAbsolutePath))
}
}
test("isStarLakeTable - with non-Star table path") {
withTempDir { dir =>
testData.write.format("parquet").mode("overwrite").save(dir.getAbsolutePath)
assert(!StarLakeUtils.isStarLakeTable(dir.getAbsolutePath))
}
}
def testError(expectedMsg: String)(thunk: => Unit): Unit = {
val e = intercept[AnalysisException] {
thunk
}
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(expectedMsg.toLowerCase(Locale.ROOT)))
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/StarLakeFileIndex.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import java.net.URI
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.spark.sql.catalyst.expressions.{Cast, Expression, GenericInternalRow, Literal}
import org.apache.spark.sql.execution.datasources.{PartitionDirectory, PartitionSpec, PartitioningAwareFileIndex}
import org.apache.spark.sql.star.StarLakeFileIndexUtils._
import org.apache.spark.sql.star.utils.DataFileInfo
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{AnalysisException, SparkSession}
import scala.collection.mutable
/** file index for data source v2 */
abstract class StarLakeFileIndexV2(val spark: SparkSession,
val snapshotManagement: SnapshotManagement)
extends PartitioningAwareFileIndex(spark, Map.empty[String, String], None) {
lazy val tableName: String = snapshotManagement.table_name
def getFileInfo(filters: Seq[Expression]): Seq[DataFileInfo] = matchingFiles(filters)
override def rootPaths: Seq[Path] = snapshotManagement.snapshot.getTableInfo.table_path :: Nil
override def refresh(): Unit = {}
/**
* Returns all matching/valid files by the given `partitionFilters` and `dataFilters`
*/
def matchingFiles(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression] = Nil): Seq[DataFileInfo]
override def partitionSchema: StructType = snapshotManagement.snapshot.getTableInfo.range_partition_schema
override def listFiles(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression]): Seq[PartitionDirectory] = {
val timeZone = spark.sessionState.conf.sessionLocalTimeZone
matchingFiles(partitionFilters, dataFilters)
.groupBy(_.range_partitions).map {
case (partitionValues, files) =>
val rowValues: Array[Any] = partitionSchema.map { p =>
Cast(Literal(partitionValues(p.name)), p.dataType, Option(timeZone)).eval()
}.toArray
//file status
val fileStats = files.map { f =>
new FileStatus(
/* length */ f.size,
/* isDir */ false,
/* blockReplication */ 0,
/* blockSize */ 1,
/* modificationTime */ f.modification_time,
absolutePath(f.file_path, tableName))
}.toArray
PartitionDirectory(new GenericInternalRow(rowValues), fileStats)
}.toSeq
}
override def partitionSpec(): PartitionSpec = {
throw new AnalysisException(
s"Function partitionSpec() is not support in merge.")
}
override def leafFiles: mutable.LinkedHashMap[Path, FileStatus] = {
throw new AnalysisException(
s"Function leafFiles() is not support in merge.")
}
override def leafDirToChildrenFiles: Map[Path, Array[FileStatus]] = {
throw new AnalysisException(
s"Function leafDirToChildrenFiles() is not support in merge.")
}
}
case class DataFileIndexV2(override val spark: SparkSession,
override val snapshotManagement: SnapshotManagement,
partitionFilters: Seq[Expression] = Nil)
extends StarLakeFileIndexV2(spark, snapshotManagement) {
override def matchingFiles(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression]): Seq[DataFileInfo] = {
PartitionFilter.filesForScan(
snapshotManagement.snapshot,
this.partitionFilters ++ partitionFilters ++ dataFilters)
}
override def inputFiles: Array[String] = {
PartitionFilter.filesForScan(snapshotManagement.snapshot, partitionFilters)
.map(f => absolutePath(f.file_path, tableName).toString)
}
override def sizeInBytes: Long = snapshotManagement.snapshot.sizeInBytes(partitionFilters)
}
/**
* A [[StarLakeFileIndexV2]] that generates the list of files from a given list of files
* that are within a version range of SnapshotManagement.
*/
case class BatchDataFileIndexV2(override val spark: SparkSession,
override val snapshotManagement: SnapshotManagement,
files: Seq[DataFileInfo])
extends StarLakeFileIndexV2(spark, snapshotManagement) {
override def matchingFiles(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression]): Seq[DataFileInfo] = {
import spark.implicits._
PartitionFilter.filterFileList(
snapshotManagement.snapshot.getTableInfo.range_partition_schema,
files.toDF(),
partitionFilters)
.as[DataFileInfo]
.collect()
}
override def inputFiles: Array[String] = {
files.map(file => absolutePath(file.file_path, tableName).toString).toArray
}
override val sizeInBytes: Long = files.map(_.size).sum
}
object StarLakeFileIndexUtils {
def absolutePath(child: String, tableName: String): Path = {
val p = new Path(new URI(child))
if (p.isAbsolute) {
p
} else {
new Path(tableName, p)
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/Snapshot.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import com.engineplus.star.meta.{DataOperation, MetaUtils}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.execution.datasources.FileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.star.utils.{DataFileInfo, PartitionFilterInfo, PartitionInfo, TableInfo}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
class Snapshot(table_info: TableInfo,
partition_info_arr: Array[PartitionInfo],
is_first_commit: Boolean = false
) {
lazy val spark: SparkSession = SparkSession.active
def getTableName: String = table_info.table_name
def getTableInfo: TableInfo = table_info
def allDataInfo: Array[DataFileInfo] = {
import spark.implicits._
allDataInfoDS.as[DataFileInfo].collect()
}
private var dataInfoCached: Boolean = false
private var partitionFilterInfoCached: Boolean = false
lazy val allDataInfoDS: Dataset[DataFileInfo] = {
import spark.implicits._
dataInfoCached = true
spark.sparkContext.parallelize(DataOperation.getTableDataInfo(partition_info_arr)).toDS()
}.persist()
lazy val allPartitionFilterInfoDF: DataFrame = {
import spark.implicits._
partitionFilterInfoCached = true
val allPartitionFilterInfo: Seq[PartitionFilterInfo] = {
partition_info_arr
.map(part =>
PartitionFilterInfo(
part.range_id,
part.range_value,
MetaUtils.getPartitionMapFromKey(part.range_value),
part.read_version))
}
spark.sparkContext.parallelize(allPartitionFilterInfo).toDF()
}.persist()
def sizeInBytes(filters: Seq[Expression] = Nil): Long = {
PartitionFilter.filesForScan(this, filters).map(_.size).sum
}
/** Return the underlying Spark `FileFormat` of the StarTable. */
def fileFormat: FileFormat = new ParquetFileFormat()
def getConfiguration: Map[String, String] = table_info.configuration
def isFirstCommit: Boolean = is_first_commit
def getPartitionInfoArray: Array[PartitionInfo] = partition_info_arr
def uncache(): Unit = {
if (dataInfoCached) {
allDataInfoDS.unpersist()
}
if (partitionFilterInfoCached) {
allPartitionFilterInfoDF.unpersist()
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/merge_operator/MergeParquetFileWithOperatorPartitionByBatchFile.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.v2.merge.MergePartitionedFile
import org.apache.spark.sql.vectorized.ColumnarBatch
class MergeParquetFileWithOperatorPartitionByBatchFile[T](filesInfo: Seq[Seq[(MergePartitionedFile, PartitionReader[ColumnarBatch])]],
mergeOperatorInfo: Map[String, MergeOperator[Any]])
extends PartitionReader[InternalRow] with Logging {
val filesItr: Iterator[Seq[(MergePartitionedFile, PartitionReader[ColumnarBatch])]] = filesInfo.iterator
var mergeLogic: MergeMultiFileWithOperator = _
/**
* @return Boolean
*/
override def next(): Boolean = {
if (mergeLogic == null) {
if (filesItr.hasNext) {
val nextFiles = filesItr.next()
if (nextFiles.isEmpty) {
return false
} else {
mergeLogic = new MergeMultiFileWithOperator(nextFiles, mergeOperatorInfo)
}
} else {
return false
}
}
if (mergeLogic.isHeapEmpty) {
if (filesItr.hasNext) {
//close current file readers
mergeLogic.closeReadFileReader()
mergeLogic = new MergeMultiFileWithOperator(filesItr.next(), mergeOperatorInfo)
} else {
return false
}
}
mergeLogic.merge()
true
}
/**
* @return InternalRow
*/
override def get(): InternalRow = {
if (mergeLogic.isTemporaryRow()) {
mergeLogic.setTemporaryRowFalse()
val temporaryRow = mergeLogic.getTemporaryRow()
val arrayRow = new GenericInternalRow(temporaryRow.clone())
arrayRow
} else {
mergeLogic.getRowByProxyMergeBatch()
}
}
override def close(): Unit = {
if (filesInfo.nonEmpty) {
filesInfo.foreach(f => f.foreach(_._2.close()))
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/CreateMaterialViewCommand.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import com.engineplus.star.meta.MetaVersion
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.{SnapshotManagement, StarLakeOptions, StarLakeUtils}
import org.apache.spark.sql.{Row, SaveMode, SparkSession}
case class CreateMaterialViewCommand(viewName: String,
viewPath: String,
sqlText: String,
rangePartitions: String,
hashPartitions: String,
hashBucketNum: String,
autoUpdate: Boolean) extends RunnableCommand with Command {
override def run(sparkSession: SparkSession): Seq[Row] = {
StarLakeUtils.executeWithoutQueryRewrite(sparkSession) {
val snapshotManagement = SnapshotManagement(viewPath)
snapshotManagement.withNewTransaction(tc => {
//fast failed if view name already exists
if (MetaVersion.isShortTableNameExists(viewName)._1) {
throw StarLakeErrors.tableExistsException(viewName)
}
val options = Map(
StarLakeOptions.RANGE_PARTITIONS -> rangePartitions,
StarLakeOptions.HASH_PARTITIONS -> hashPartitions,
StarLakeOptions.HASH_BUCKET_NUM -> hashBucketNum,
StarLakeOptions.SHORT_TABLE_NAME -> viewName,
StarLakeOptions.CREATE_MATERIAL_VIEW -> "true",
StarLakeOptions.MATERIAL_SQL_TEXT -> sqlText,
StarLakeOptions.MATERIAL_AUTO_UPDATE -> autoUpdate.toString
)
val data = sparkSession.sql(sqlText)
val (newFiles, deletedFiles) = WriteIntoTable(
snapshotManagement,
SaveMode.ErrorIfExists,
new StarLakeOptions(options, sparkSession.sessionState.conf),
configuration = Map.empty, //table.properties,
data).write(tc, sparkSession)
tc.commit(newFiles, deletedFiles)
})
}
Nil
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/MergeParquetSingletonFilePartitionByBatchFile.scala
|
<filename>src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/MergeParquetSingletonFilePartitionByBatchFile.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.v2.merge.MergePartitionedFile
import org.apache.spark.sql.vectorized.ColumnarBatch
/**
* merge on multi partition files
*
* @param filesInfo Seq(Seq()) => rangePartitions(filesInOnePartition())
* @tparam T
*/
class MergeParquetSingletonFilePartitionByBatchFile[T](filesInfo: Seq[Seq[(MergePartitionedFile, PartitionReader[ColumnarBatch])]])
extends PartitionReader[InternalRow] with Logging {
val filesItr: Iterator[Seq[(MergePartitionedFile, PartitionReader[ColumnarBatch])]] = filesInfo.iterator
var mergeLogic: MergeSingletonFile = _
/**
* @return Boolean
*/
override def next(): Boolean = {
if (mergeLogic == null) {
if (filesItr.hasNext) {
val nextFiles = filesItr.next()
if (nextFiles.isEmpty) {
return false
} else {
mergeLogic = new MergeSingletonFile(nextFiles)
}
} else {
return false
}
}
if (mergeLogic.deDuplication()) {
true
} else if (filesItr.hasNext) {
//close current file readers
mergeLogic.closeReadFileReader()
mergeLogic = new MergeSingletonFile(filesItr.next())
mergeLogic.deDuplication()
} else {
false
}
}
/**
* @return InternalRow
*/
override def get(): InternalRow = {
mergeLogic.getRow()
}
override def close() = if (filesInfo.nonEmpty) {
filesInfo.foreach(f => f.foreach(_._2.close()))
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/CleanupCommand.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import java.net.URI
import java.util.Date
import com.engineplus.star.meta.{DataOperation, MetaCommit}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions._
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.utils.FileOperation
import org.apache.spark.sql.star.{SnapshotManagement, StarLakeUtils}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.util.{Clock, SerializableConfiguration, SystemClock}
import scala.collection.JavaConverters._
object CleanupCommand extends CleanupCommandImpl with Serializable {
/**
* Clears all untracked files and folders within this table. First lists all the files and
* directories in the table, and gets the relative paths with respect to the base of the
* table. Then it gets the list of all tracked files for this table, which may or may not
* be within the table base path, and gets the relative paths of all the tracked files with
* respect to the base of the table. Files outside of the table path will be ignored.
* Then we take a diff of the files and delete directories that were already empty, and all files
* that are within the table that are no longer tracked.
*
* @param dryRun If set to true, no files will be deleted. Instead, we will list all files and
* directories that will be cleared.
* @return A Dataset containing the paths of the files/folders to delete in dryRun mode. Otherwise
* returns the base path of the table.
*/
def runCleanup(spark: SparkSession,
snapshotManagement: SnapshotManagement,
dryRun: Boolean = true,
clock: Clock = new SystemClock): DataFrame = {
val retentionMillis = spark.conf.get(StarLakeSQLConf.OLD_VERSION_RETENTION_TIME)
val deleteBeforeTimestamp = clock.getTimeMillis() - retentionMillis
MetaCommit.cleanUndoLog(snapshotManagement.snapshot)
snapshotManagement.updateSnapshot().getPartitionInfoArray.foreach(partitionInfo => {
DataOperation.removeFileByName(
partitionInfo.table_id,
partitionInfo.range_id,
partitionInfo.read_version)
})
val path = new Path(snapshotManagement.table_name)
val sessionHadoopConf = spark.sessionState.newHadoopConf()
val fs = path.getFileSystem(sessionHadoopConf)
import spark.implicits._
logInfo(s"Starting garbage collection (dryRun = $dryRun) of untracked files older than " +
s"${new Date(deleteBeforeTimestamp).toGMTString} in $path")
val hadoopConf = spark.sparkContext.broadcast(
new SerializableConfiguration(sessionHadoopConf))
val basePath = fs.makeQualified(path).toString
val snapshot = snapshotManagement.snapshot
val vPath = new Path(basePath)
val validFiles = snapshot.allDataInfoDS
.withColumn("path", transDataFilePathToRelativeUDF(vPath, hadoopConf.value)(col("file_path")))
.select("path")
val partitionColumns = snapshot.getTableInfo.range_partition_columns
val parallelism = spark.conf.get(StarLakeSQLConf.CLEANUP_PARALLELISM)
val allFilesAndDirs =
FileOperation.recursiveListDirs(
spark,
Seq(basePath),
hadoopConf,
hiddenFileNameFilter = StarLakeUtils.isHiddenDirectory(partitionColumns, _),
fileListingParallelism = Option(parallelism))
try {
allFilesAndDirs.cache()
val dirCounts = allFilesAndDirs.where('isDir).count() + 1 // +1 for the base path
// The logic below is as follows:
// 1. We take all the files and directories listed in our reservoir
// 2. We filter all files older than our tombstone retention period and directories
// 3. We get the subdirectories of all files so that we can find non-empty directories
// 4. We groupBy each path, and count to get how many files are in each sub-directory
// 5. We subtract all the valid files and tombstones in our state
// 6. We filter all paths with a count of 1, which will correspond to files not in the
// state, and empty directories. We can safely delete all of these
val diff = allFilesAndDirs
.where('modificationTime < deleteBeforeTimestamp || 'isDir)
.mapPartitions { fileStatusIterator =>
val reservoirBase = new Path(basePath)
val fs = reservoirBase.getFileSystem(hadoopConf.value.value)
fileStatusIterator.flatMap { fileStatus =>
if (fileStatus.isDir) {
Iterator.single(relativize(fileStatus.getPath, fs, reservoirBase, isDir = true))
} else {
val dirs = getAllSubdirs(basePath, fileStatus.path, fs)
val dirsWithSlash = dirs.map { p =>
relativize(new Path(p), fs, reservoirBase, isDir = true)
}
dirsWithSlash ++ Iterator(
relativize(new Path(fileStatus.path), fs, reservoirBase, isDir = false))
}
}
}.groupBy($"value" as 'path)
.count()
.join(validFiles, Seq("path"), "leftanti")
.where('count === 1)
.select('path)
.as[String]
.map { relativePath =>
assert(!stringToPath(relativePath).isAbsolute,
"Shouldn't have any absolute paths for deletion here.")
pathToString(FileOperation.absolutePath(basePath, relativePath))
}
if (dryRun) {
val numFiles = diff.count()
logInfo(s"Found $numFiles files and directories in a total of " +
s"$dirCounts directories that are safe to delete.")
return diff.map(f => stringToPath(f).toString).toDF("path")
}
logInfo(s"Deleting untracked files and empty directories in $path")
val canConcurrentDelete = spark.conf.get(StarLakeSQLConf.CLEANUP_CONCURRENT_DELETE_ENABLE)
val filesDeleted = if (canConcurrentDelete) {
deleteConcurrently(
spark,
diff,
snapshotManagement.table_name,
spark.sparkContext.broadcast(new SerializableConfiguration(sessionHadoopConf)))
} else {
delete(diff, fs)
}
logInfo(s"Deleted $filesDeleted files and directories in a total " +
s"of $dirCounts directories.")
spark.createDataset(Seq(basePath)).toDF("path")
} finally {
allFilesAndDirs.unpersist()
}
}
}
trait CleanupCommandImpl extends Logging {
protected def transDataFilePathToRelative(file: String, basePath: Path, fs: FileSystem): String = {
val filePath = new Path(file)
pathToString(FileOperation.tryRelativizePath(fs, basePath, filePath))
}
protected def transDataFilePathToRelativeUDF(basePath: Path,
hadoopConf: SerializableConfiguration): UserDefinedFunction =
udf((file: String) => {
val fs = basePath.getFileSystem(hadoopConf.value)
transDataFilePathToRelative(file, basePath, fs)
})
/**
* Attempts to relativize the `path` with respect to the `reservoirBase` and converts the path to
* a string.
*/
protected def relativize(path: Path,
fs: FileSystem,
reservoirBase: Path,
isDir: Boolean): String = {
pathToString(FileOperation.tryRelativizePath(fs, reservoirBase, path))
}
/**
* Wrapper function for FileOperations.getAllSubDirectories
* returns all subdirectories that `file` has with respect to `base`.
*/
protected def getAllSubdirs(base: String, file: String, fs: FileSystem): Iterator[String] = {
FileOperation.getAllSubDirectories(base, file)._1
}
/**
* Attempts to delete the list of candidate files. Returns the number of files deleted.
*/
protected def delete(diff: Dataset[String], fs: FileSystem): Long = {
val fileResultSet = diff.toLocalIterator().asScala
fileResultSet.map(p => stringToPath(p)).count(f => FileOperation.tryDeleteNonRecursive(fs, f))
}
protected def deleteConcurrently(spark: SparkSession,
diff: Dataset[String],
tablePath: String,
hadoopConf: Broadcast[SerializableConfiguration]): Long = {
import spark.implicits._
diff.mapPartitions { files =>
val fs = new Path(tablePath).getFileSystem(hadoopConf.value.value)
val filesDeletedPerPartition =
files.map(p => stringToPath(p)).count(f => FileOperation.tryDeleteNonRecursive(fs, f))
Iterator(filesDeletedPerPartition)
}.reduce(_ + _)
}
protected def stringToPath(path: String): Path = new Path(new URI(path))
protected def pathToString(path: Path): String = path.toUri.toString
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/MaterialView.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import org.apache.spark.sql.star.material_view.ConstructQueryInfo
import org.apache.spark.sql.star.utils.{MaterialViewInfo, RelationTable}
import scala.util.matching.Regex
object MaterialView {
private val cassandraConnector = MetaUtils.cassandraConnector
private val database = MetaUtils.DATA_BASE
//check whether material view exists or not
def isMaterialViewExists(view_name: String): Boolean = {
cassandraConnector.withSessionDo(session => {
val res = session.execute(
s"""
|select table_name from $database.material_view
|where view_name='$view_name'
""".stripMargin)
try {
res.one().getString("table_name")
} catch {
case _: NullPointerException => return false
case e: Exception => throw e
}
true
})
}
def addMaterialView(view_name: String,
table_name: String,
table_id: String,
relation_tables: String,
sql_text: String,
auto_update: Boolean,
view_info_index: String): Unit = {
cassandraConnector.withSessionDo(session => {
val format_sql_text = MetaUtils.formatSqlTextToCassandra(sql_text)
session.execute(
s"""
|insert into $database.material_view
|(view_name,table_name,table_id,relation_tables,sql_text,auto_update,view_info)
|values ('$view_name', '$table_name', '$table_id', '$relation_tables', '$format_sql_text',
|$auto_update, '$view_info_index')
""".stripMargin)
})
}
def updateMaterialView(view_name: String,
relation_tables: String,
auto_update: Boolean): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|update $database.material_view
|set relation_tables='$relation_tables',auto_update=$auto_update
|where view_name='$view_name'
""".stripMargin)
})
}
def deleteMaterialView(view_name: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.material_view where view_name='$view_name'
""".stripMargin)
})
}
def getMaterialViewInfo(view_name: String): Option[MaterialViewInfo] = {
cassandraConnector.withSessionDo(session => {
val res = session.execute(
s"""
|select table_id,sql_text,relation_tables,auto_update,view_info from $database.material_view
|where view_name='$view_name'
""".stripMargin).one()
try {
val table_id = res.getString("table_id")
val view_info_index = res.getString("view_info")
//if viewInfo is some fragment index, we should get true value and joint them
val parttern = new Regex("\\w{8}(-\\w{4}){3}-\\w{12}")
val view_info =
if (parttern.findFirstIn(view_info_index).isDefined
&& !view_info_index.contains("{")) {
FragmentValue.getEntireValue(table_id, view_info_index)
} else {
view_info_index
}
Some(MaterialViewInfo(
view_name,
MetaUtils.formatSqlTextFromCassandra(res.getString("sql_text")),
res.getString("relation_tables").split(",").map(m => RelationTable.build(m)),
res.getBool("auto_update"),
false,
ConstructQueryInfo.buildInfo(view_info)))
} catch {
case _: NullPointerException => return None
case e: Exception => throw e
}
})
}
def updateMaterialRelationInfo(table_id: String,
table_name: String,
new_views: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|insert into $database.material_relation
|(table_id,table_name,material_views)
|values ('$table_id', '$table_name', '$new_views')
""".stripMargin)
})
}
def getMaterialRelationInfo(table_id: String): String = {
cassandraConnector.withSessionDo(session => {
val res = session.execute(
s"""
|select material_views from $database.material_relation where table_id='$table_id'
""".stripMargin)
try {
res.one().getString("material_views")
} catch {
case _: NullPointerException => return ""
case e: Exception => throw e
}
})
}
def deleteMaterialRelationInfo(table_id: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.material_relation where table_id='$table_id'
""".stripMargin)
})
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/material_view/RangeInfo.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.material_view
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.material_view.RangeInfo.compareRange
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import scala.collection.mutable
class RangeInfo(dataType: DataType) {
private var lower: Any = _
private var upper: Any = _
private var includeLower: Boolean = true
private var includeUpper: Boolean = true
def buildDetail(): RangeDetail = {
RangeDetail(dataType, lower, upper, includeLower, includeUpper)
}
def setRangeProperties(colName: String,
limit: Any,
rangeType: String): Unit = {
rangeType match {
case "GreaterThan" =>
if (lower == null) {
lower = limit
includeLower = false
} else {
val re = compareRange(limit, lower, dataType)
if (re >= 0) {
lower = limit
includeLower = false
}
}
case "GreaterThanOrEqual" =>
if (lower == null) {
lower = limit
} else {
val re = compareRange(limit, lower, dataType)
if (re > 0) {
lower = limit
includeLower = true
} else if (re == 0) {
lower = limit
}
}
case "LessThan" =>
if (upper == null) {
upper = limit
includeUpper = false
} else {
val re = compareRange(limit, upper, dataType)
if (re <= 0) {
upper = limit
includeUpper = false
}
}
case "LessThanOrEqual" =>
if (upper == null) {
upper = limit
} else {
val re = compareRange(limit, upper, dataType)
if (re < 0) {
upper = limit
includeUpper = true
} else if (re == 0) {
upper = limit
}
}
}
}
}
object RangeInfo {
private val FIXED_DECIMAL = """decimal\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
private val CHAR_TYPE = """char\(\s*(\d+)\s*\)""".r
private val VARCHAR_TYPE = """varchar\(\s*(\d+)\s*\)""".r
private val otherTypes = {
Seq(
("NullType", NullType),
("DateType", DateType),
("TimestampType", TimestampType),
("BinaryType", BinaryType),
("IntegerType", IntegerType),
("BooleanType", BooleanType),
("LongType", LongType),
("DoubleType", DoubleType),
("FloatType", FloatType),
("ShortType", ShortType),
("ByteType", ByteType),
("StringType", StringType),
("CalendarIntervalType", CalendarIntervalType))
.toMap
}
/** Given the string representation of a type, return its DataType */
private def nameToType(name: String): DataType = {
name match {
case "decimal" => DecimalType.USER_DEFAULT
case FIXED_DECIMAL(precision, scale) => DecimalType(precision.toInt, scale.toInt)
case CHAR_TYPE(length) => CharType(length.toInt)
case VARCHAR_TYPE(length) => VarcharType(length.toInt)
case other => otherTypes.getOrElse(
other,
throw new IllegalArgumentException(
s"Failed to convert the JSON string '$name' to a data type."))
}
}
private def getValue(str: String): Any = {
if (str.equals("_STAR_META_NULL_")) {
null
} else {
str
}
}
def buildDetail(str: String): RangeDetail = {
val split = str.split(",")
RangeDetail(
nameToType(split(0)),
getValue(split(1)),
getValue(split(2)),
split(3).toBoolean,
split(4).toBoolean)
}
def setRangeInfo(rangeInfo: mutable.Map[String, RangeInfo],
dataType: DataType,
colName: String,
limit: Any,
rangeType: String): Unit = {
val info = rangeInfo.getOrElse(
colName,
new RangeInfo(dataType)
)
info.setRangeProperties(colName, limit, rangeType)
if (!rangeInfo.contains(colName)) {
rangeInfo.put(colName, info)
}
}
def matchEqual(a: RangeDetail, b: RangeDetail): Boolean = {
val lowerMatch = if (a.lower == null && b.lower == null) {
true
} else if (a.lower != null && b.lower != null) {
if (a.includeLower == b.includeLower && transAndCompareRange(a.lower, b.lower, a.dataType) == 0) {
true
} else {
false
}
} else {
false
}
if (lowerMatch) {
if (a.upper == null && b.upper == null) {
true
} else if (a.upper != null && b.upper != null) {
if (a.includeUpper == b.includeUpper && transAndCompareRange(a.upper, b.upper, a.dataType) == 0) {
true
} else {
false
}
} else {
false
}
} else {
false
}
}
def compareRange(left: Any, right: Any, dataType: DataType): Int = {
dataType match {
case BooleanType => left.asInstanceOf[Boolean].compareTo(right.asInstanceOf[Boolean])
case ByteType => left.asInstanceOf[Byte].compareTo(right.asInstanceOf[Byte])
case ShortType => left.asInstanceOf[Short].compareTo(right.asInstanceOf[Short])
case IntegerType | DateType => left.asInstanceOf[Int].compareTo(right.asInstanceOf[Int])
case LongType | TimestampType => left.asInstanceOf[Long].compareTo(right.asInstanceOf[Long])
case FloatType => left.asInstanceOf[Float].compareTo(right.asInstanceOf[Float])
case DoubleType => left.asInstanceOf[Double].compareTo(right.asInstanceOf[Double])
case StringType => left.asInstanceOf[UTF8String].compareTo(right.asInstanceOf[UTF8String])
case _ => throw StarLakeErrors.unsupportedDataTypeInMaterialRewriteQueryException(dataType)
}
}
def transAndCompareRange(left: Any, right: Any, dataType: DataType): Int = {
dataType match {
case BooleanType => left.toString.toBoolean.compareTo(right.toString.toBoolean)
case ByteType => left.toString.toByte.compareTo(right.toString.toByte)
case ShortType => left.toString.toShort.compareTo(right.toString.toShort)
case IntegerType | DateType => left.toString.toInt.compareTo(right.toString.toInt)
case LongType | TimestampType => left.toString.toLong.compareTo(right.toString.toLong)
case FloatType => left.toString.toFloat.compareTo(right.toString.toFloat)
case DoubleType => left.toString.toDouble.compareTo(right.toString.toDouble)
case StringType => UTF8String.fromString(left.toString).compareTo(UTF8String.fromString(right.toString))
case _ => throw StarLakeErrors.unsupportedDataTypeInMaterialRewriteQueryException(dataType)
}
}
def valueInRange(value: String, range: RangeDetail): Boolean = {
//check lower boundary
val lowerIn = if (range.lower == null) {
true
} else {
val result = transAndCompareRange(range.lower, value, range.dataType)
if (result < 0) {
true
} else if (result == 0 && range.includeLower) {
true
} else {
false
}
}
if (lowerIn) {
//check upper boundary
if (range.upper == null) {
true
} else {
val result = transAndCompareRange(range.upper, value, range.dataType)
if (result > 0) {
true
} else if (result == 0 && range.includeUpper) {
true
} else {
false
}
}
} else {
false
}
}
//return -1 if queryRange is not in scope of viewRange
//return 0 if queryRange is equal to viewRange
//return 1 if queryRange is a subset of viewRange
def compareRangeDetail(queryRange: RangeDetail, viewRange: RangeDetail): Int = {
var result = 0
if (queryRange.lower == null && viewRange.lower != null) {
result = -1
} else if (viewRange.lower != null) {
val compare = transAndCompareRange(queryRange.lower, viewRange.lower, queryRange.dataType)
if (compare < 0) {
result = -1
} else if (compare == 0) {
if (queryRange.includeLower && !viewRange.includeLower) {
result = -1
} else if (!queryRange.includeLower.equals(viewRange.includeLower)) {
result = 1
}
} else {
result = 1
}
}
if (result >= 0) {
if (queryRange.upper == null && viewRange.upper != null) {
result = -1
} else if (viewRange.upper != null) {
val compare = RangeInfo.transAndCompareRange(queryRange.upper, viewRange.upper, queryRange.dataType)
if (compare > 0) {
result = -1
} else if (compare == 0) {
if (queryRange.includeUpper && !viewRange.includeUpper) {
result = -1
} else if (!queryRange.includeUpper.equals(viewRange.includeUpper)) {
result = 1
}
} else {
result = 1
}
}
}
result
}
}
//range detail info of query/table
case class RangeDetail(dataType: DataType,
lower: Any,
upper: Any,
includeLower: Boolean,
includeUpper: Boolean) {
override def toString: String = {
dataType.toString + "," + getString(lower) + "," + getString(upper) + "," +
includeLower.toString + "," + includeUpper.toString
}
private def getString(str: Any): String = {
if (str == null) {
"_STAR_META_NULL_"
} else {
str.toString
}
}
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/StreamingRecord.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import scala.collection.mutable.ArrayBuffer
object StreamingRecord {
private val cassandraConnector = MetaUtils.cassandraConnector
private val database = MetaUtils.DATA_BASE
def getStreamingInfo(tableId: String): (String, Long) = {
cassandraConnector.withSessionDo(session => {
try {
val res = session.execute(
s"""
|select query_id,batch_id from $database.streaming_info
|where table_id='$tableId' allow filtering
""".stripMargin).one()
(res.getString("query_id"), res.getLong("batch_id"))
} catch {
case e: Exception => ("", -1L)
}
})
}
def getBatchId(tableId: String, queryId: String): Long = {
cassandraConnector.withSessionDo(session => {
try {
val res = session.execute(
s"""
|select batch_id from $database.streaming_info
|where table_id='$tableId' and query_id='$queryId'
""".stripMargin)
res.one().getLong("batch_id")
} catch {
case e: Exception => -1L
}
})
}
def updateStreamingInfo(tableId: String, queryId: String, batchId: Long, timestamp: Long): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|insert into $database.streaming_info(table_id,query_id,batch_id,timestamp)
|values('$tableId','$queryId',$batchId,$timestamp)
""".stripMargin)
})
}
def deleteStreamingInfoByTableId(tableId: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.streaming_info
|where table_id='$tableId'
""".stripMargin)
})
}
def deleteStreamingInfoByTimestamp(tableId: String, expireTimestamp: Long): Unit = {
val expireInfo = getTimeoutStreamingInfo(tableId, expireTimestamp)
expireInfo.foreach(info => deleteStreamingInfo(info._1, info._2))
}
private def getTimeoutStreamingInfo(tableId: String, expireTimestamp: Long): Seq[(String, String)] = {
cassandraConnector.withSessionDo(session => {
val res = session.execute(
s"""
|select table_id,query_id,batch_id from $database.streaming_info
|where table_id='$tableId' and timestamp<$expireTimestamp allow filtering
""".stripMargin)
val itr = res.iterator()
val arrBuf = new ArrayBuffer[(String, String)]()
while (itr.hasNext) {
val re = itr.next()
val info = (re.getString("table_id"), re.getString("query_id"))
arrBuf += info
}
arrBuf
})
}
private def deleteStreamingInfo(tableId: String, query_id: String): Unit = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.streaming_info
|where table_id='$tableId' and query_id='$query_id'
""".stripMargin)
})
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/storage/HDFSLogStore.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.storage
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import scala.util.control.NonFatal
/**
* The [[LogStore]] implementation for HDFS, which uses Hadoop [[FileContext]] API's to
* provide the necessary atomic and durability guarantees:
*
* 1. Atomic visibility of files: `FileContext.rename` is used write files which is atomic for HDFS.
*
* 2. Consistent file listing: HDFS file listing is consistent.
*/
class HDFSLogStore(sparkConf: SparkConf, defaultHadoopConf: Configuration)
extends HadoopFileSystemLogStore(sparkConf, defaultHadoopConf) with Logging {
protected def getFileContext(path: Path): FileContext = {
FileContext.getFileContext(path.toUri, getHadoopConfiguration)
}
val noAbstractFileSystemExceptionMessage = "No AbstractFileSystem"
def write(path: Path, actions: Iterator[String], overwrite: Boolean = false): Unit = {
throw new Exception("We won't use this to write hdfs file.")
}
private def tryRemoveCrcFile(fc: FileContext, path: Path): Unit = {
try {
val checksumFile = new Path(path.getParent, s".${path.getName}.crc")
if (fc.util.exists(checksumFile)) {
// checksum file exists, deleting it
fc.delete(checksumFile, true)
}
} catch {
case NonFatal(_) => // ignore, we are removing crc file as "best-effort"
}
}
override def isPartialWriteVisible(path: Path): Boolean = true
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/MergeFilePartitionReaderFactory.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory}
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.MergeParquetSingletonFilePartitionByBatchFile
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator.{MergeOperator, MergeParquetFileWithOperatorPartitionByBatchFile}
import org.apache.spark.sql.execution.datasources.v2.merge.{MergeFilePartition, MergeFilePartitionReader, MergePartitionedFile, MergePartitionedFileReader}
import org.apache.spark.sql.vectorized.ColumnarBatch
abstract class MergeFilePartitionReaderFactory(mergeOperatorInfo: Map[String, MergeOperator[Any]])
extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
assert(partition.isInstanceOf[MergeFilePartition])
val filePartition = partition.asInstanceOf[MergeFilePartition]
val iter = filePartition.files.toIterator.map { files =>
assert(files.forall(_.isInstanceOf[MergePartitionedFile]))
files.map(f => f -> buildColumnarReader(f)).toSeq
}.toSeq
val mergeReader =
if (filePartition.isSingleFile) {
new MergeParquetSingletonFilePartitionByBatchFile[InternalRow](iter)
} else {
new MergeParquetFileWithOperatorPartitionByBatchFile[InternalRow](iter, mergeOperatorInfo)
}
new MergeFilePartitionReader[InternalRow](
Iterator(MergePartitionedFileReader( //filePartition.files.head,
mergeReader))
)
}
override def createColumnarReader(partition: InputPartition): PartitionReader[ColumnarBatch] = {
throw new Exception("this function is not supported")
}
def buildReader(partitionedFile: MergePartitionedFile): PartitionReader[InternalRow]
def buildColumnarReader(partitionedFile: MergePartitionedFile): PartitionReader[ColumnarBatch] = {
throw new UnsupportedOperationException("Cannot create columnar reader.")
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/DropTableCommand.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import java.util.concurrent.TimeUnit
import com.engineplus.star.meta._
import com.engineplus.star.tables.StarTable
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{Expression, PredicateHelper}
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.utils.FileOperation
import org.apache.spark.sql.star.{PartitionFilter, Snapshot, SnapshotManagement}
object DropTableCommand {
val MAX_ATTEMPTS: Int = MetaUtils.GET_LOCK_MAX_ATTEMPTS
val WAIT_TIME: Int = MetaUtils.DROP_TABLE_WAIT_SECONDS
def run(snapshot: Snapshot): Unit = {
val table_id = snapshot.getTableInfo.table_id
val table_name = snapshot.getTableInfo.table_name
var i = 0
while (i < MAX_ATTEMPTS) {
if (UndoLog.addDropTableUndoLog(table_name, table_id)) {
dropTable(snapshot)
i = MAX_ATTEMPTS
} else {
i = checkAndDropTable(snapshot, i)
}
}
}
private def checkAndDropTable(snapshot: Snapshot, i: Int): Int = {
val table_id = snapshot.getTableInfo.table_id
val (timestamp, _) = UndoLog.getCommitTimestampAndTag(
UndoLogType.DropTable.toString,
table_id,
"dropTable")
if (timestamp < 0) {
MAX_ATTEMPTS
} else if (timestamp > System.currentTimeMillis() - MetaUtils.COMMIT_TIMEOUT) {
TimeUnit.SECONDS.sleep(WAIT_TIME)
checkAndDropTable(snapshot, i)
} else {
val update_timestamp = UndoLog.updateUndoLogTimestamp(
commit_type = UndoLogType.DropTable.toString,
table_id = table_id,
commit_id = "dropTable",
last_timestamp = timestamp
)
if (update_timestamp._1) {
dropTable(snapshot)
MAX_ATTEMPTS
}
else {
i + 1
}
}
}
def dropTable(snapshot: Snapshot): Unit = {
val tableInfo = snapshot.getTableInfo
val table_id = tableInfo.table_id
val table_name = tableInfo.table_name
val short_table_name = tableInfo.short_table_name
//delete material views associated with this table
if (!tableInfo.is_material_view) {
val materialViews = MaterialView.getMaterialRelationInfo(table_id)
if (materialViews.nonEmpty) {
materialViews.split(",").foreach(view => StarTable.forName(view).dropTable())
}
}
MetaVersion.deleteTableInfo(table_name, table_id)
if (short_table_name.isDefined) {
MetaVersion.deleteShortTableName(short_table_name.get, table_name)
//if this table is material view, clean relation info
if (tableInfo.is_material_view) {
//clean material_relation info
val materialViewInfo = MaterialView.getMaterialViewInfo(short_table_name.get)
if (materialViewInfo.isDefined) {
materialViewInfo.get.relationTables.foreach(table => {
val relationTableInfo = MetaVersion.getTableInfo(table.tableName)
//drop view from relation info
dropMaterialViewFromRelation(
relationTableInfo.table_id,
relationTableInfo.table_name,
short_table_name.get)
})
}
//clean material_view info
MaterialView.deleteMaterialView(short_table_name.get)
}
}
UndoLog.deleteUndoLogByTableId(UndoLogType.Commit.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.Material.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.ShortTableName.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.Partition.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.Schema.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.AddFile.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.ExpireFile.toString, table_id)
UndoLog.deleteUndoLogByTableId(UndoLogType.DropPartition.toString, table_id)
TimeUnit.SECONDS.sleep(WAIT_TIME)
val partition_info_arr = snapshot.getPartitionInfoArray
MetaVersion.deletePartitionInfoByTableId(table_id)
partition_info_arr.foreach(part => DataOperation.deleteDataInfoByRangeId(table_id, part.range_id))
FragmentValue.deleteFragmentValueByTableId(table_id)
StreamingRecord.deleteStreamingInfoByTableId(table_id)
val path = new Path(table_name)
val sessionHadoopConf = SparkSession.active.sessionState.newHadoopConf()
val fs = path.getFileSystem(sessionHadoopConf)
FileOperation.tryDeleteRecursive(fs, path)
UndoLog.deleteUndoLogByTableId(UndoLogType.DropTable.toString, table_id)
SnapshotManagement.invalidateCache(table_name)
}
private def dropMaterialViewFromRelation(table_id: String,
table_name: String,
view_name: String): Unit = {
val existsViews = MaterialView.getMaterialRelationInfo(table_id)
val newViews = existsViews.split(",").toSet.-(view_name).mkString(",")
if (newViews.isEmpty) {
MaterialView.deleteMaterialRelationInfo(table_id)
} else {
MaterialView.updateMaterialRelationInfo(table_id, table_name, newViews)
}
}
}
object DropPartitionCommand extends PredicateHelper {
val MAX_ATTEMPTS: Int = MetaUtils.GET_LOCK_MAX_ATTEMPTS
val WAIT_TIME: Int = MetaUtils.DROP_TABLE_WAIT_SECONDS
def run(snapshot: Snapshot, condition: Expression): Unit = {
val table_name = snapshot.getTableName
val table_id = snapshot.getTableInfo.table_id
val candidatePartitions = PartitionFilter.partitionsForScan(snapshot, Seq(condition))
//only one partition is allowed to drop at a time
if (candidatePartitions.isEmpty) {
StarLakeErrors.partitionNotFoundException(snapshot.getTableName, condition.toString())
} else if (candidatePartitions.length > 1) {
StarLakeErrors.tooMuchPartitionException(
snapshot.getTableName,
condition.toString(),
candidatePartitions.length)
}
val range_id = candidatePartitions.head.range_id
val range_value = candidatePartitions.head.range_value
var i = 0
while (i < MAX_ATTEMPTS) {
if (UndoLog.addDropPartitionUndoLog(table_name, table_id, range_value, range_id)) {
dropPartition(table_name, table_id, range_value, range_id)
i = MAX_ATTEMPTS
} else {
i = checkAndDropPartition(table_name, table_id, range_value, range_id, i)
}
}
}
private def checkAndDropPartition(table_name: String, table_id: String, range_value: String, range_id: String, i: Int): Int = {
val (timestamp, _) = UndoLog.getCommitTimestampAndTag(
UndoLogType.DropPartition.toString,
table_id,
UndoLogType.DropPartition.toString,
range_id)
if (timestamp < 0) {
MAX_ATTEMPTS
} else if (timestamp > System.currentTimeMillis() - MetaUtils.COMMIT_TIMEOUT) {
TimeUnit.SECONDS.sleep(WAIT_TIME)
checkAndDropPartition(table_name, table_id, range_value, range_id, i)
} else {
val update_timestamp = UndoLog.updateUndoLogTimestamp(
commit_type = UndoLogType.DropPartition.toString,
table_id = table_id,
commit_id = UndoLogType.DropPartition.toString,
range_id = range_id,
last_timestamp = timestamp
)
if (update_timestamp._1) {
dropPartition(table_name, table_id, range_value, range_id)
MAX_ATTEMPTS
}
else {
i + 1
}
}
}
def dropPartition(table_name: String, table_id: String, range_value: String, range_id: String): Unit = {
MetaVersion.deletePartitionInfoByRangeId(table_id, range_value, range_id)
UndoLog.deleteUndoLogByRangeId(UndoLogType.Partition.toString, table_id, range_id)
UndoLog.deleteUndoLogByRangeId(UndoLogType.AddFile.toString, table_id, range_id)
UndoLog.deleteUndoLogByRangeId(UndoLogType.ExpireFile.toString, table_id, range_id)
TimeUnit.SECONDS.sleep(WAIT_TIME)
DataOperation.deleteDataInfoByRangeId(table_id, range_id)
UndoLog.deleteUndoLogByRangeId(
UndoLogType.DropPartition.toString,
table_id,
UndoLogType.DropPartition.toString,
range_id)
SnapshotManagement(table_name).updateSnapshot()
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/StarLakePartFileMergeSuite.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import com.engineplus.star.tables.StarTable
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.functions._
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.test.{MergeOpInt, MergeOpString02, StarLakeTestUtils}
import org.apache.spark.sql.test.SharedSparkSession
class StarLakePartFileMergeSuite extends QueryTest
with SharedSparkSession with StarLakeTestUtils {
import testImplicits._
test("simple part merge when there are to many delta files") {
withTempDir(dir => {
withSQLConf(
StarLakeSQLConf.PART_MERGE_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_COMPACTION_COMMIT_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_FILE_SIZE_FACTOR.key -> "0.0000001",
StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM.key -> "3") {
val tablePath = dir.getCanonicalPath
val data1 = Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value2"), ("range1", "hash3", "value3"))
.toDF("range", "hash", "value")
val data2 = Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value2"), ("range1", "hash3", "value3"))
.toDF("range", "hash", "value")
val data3 = Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value21"), ("range1", "hash3", "value31"))
.toDF("range", "hash", "value")
val data4 = Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value2"), ("range1", "hash3", "value3"))
.toDF("range", "hash", "value")
val data5 = Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value22"), ("range1", "hash3", "value33"))
.toDF("range", "hash", "value")
data1.write
.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.save(tablePath)
val table = StarTable.forPath(tablePath)
table.upsert(data2)
table.upsert(data3)
table.upsert(data4)
table.upsert(data5)
val snapshotManagement = SnapshotManagement(tablePath)
var partitionInfo = snapshotManagement.snapshot.getPartitionInfoArray.head
val oriDeltaNum = partitionInfo.delta_file_num
val oriReadVersion = partitionInfo.read_version
checkAnswer(table.toDF.select("range", "hash", "value"),
Seq(("range1", "hash1", "value1"), ("range1", "hash2", "value22"), ("range1", "hash3", "value33"))
.toDF("range", "hash", "value"))
partitionInfo = snapshotManagement.updateSnapshot().getPartitionInfoArray.head
assert(partitionInfo.delta_file_num ==
(oriDeltaNum % spark.sessionState.conf.getConf(StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM) + 1))
assert(partitionInfo.read_version == oriReadVersion + 1)
}
})
}
test("part merge with merge operator") {
withTempDir(dir => {
withSQLConf(
StarLakeSQLConf.PART_MERGE_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_COMPACTION_COMMIT_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_FILE_SIZE_FACTOR.key -> "0.0000001",
StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM.key -> "2") {
val tablePath = dir.getCanonicalPath
val data1 = Seq(("range1", "hash1", "a1", 1), ("range1", "hash2", "a2", 2), ("range1", "hash3", "a3", 3))
.toDF("range", "hash", "value1", "value2")
val data2 = Seq(("range1", "hash1", "b1", 11), ("range1", "hash2", "b2", 22), ("range1", "hash3", "b3", 33))
.toDF("range", "hash", "value1", "value2")
val data3 = Seq(("range1", "hash1", "c1", 111), ("range1", "hash2", "c2", 222), ("range1", "hash3", "c3", 333))
.toDF("range", "hash", "value1", "value2")
val data4 = Seq(("range1", "hash1", "d1", 1111), ("range1", "hash2", "d2", 2222), ("range1", "hash3", "d3", 3333))
.toDF("range", "hash", "value1", "value2")
val data5 = Seq(("range1", "hash1", "e1", 11111), ("range1", "hash2", "e2", 22222), ("range1", "hash3", "e3", 33333))
.toDF("range", "hash", "value1", "value2")
data1.write
.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.save(tablePath)
val table = StarTable.forPath(tablePath)
table.upsert(data2)
table.upsert(data3)
table.upsert(data4)
table.upsert(data5)
val snapshotManagement = SnapshotManagement(tablePath)
var partitionInfo = snapshotManagement.snapshot.getPartitionInfoArray.head
val oriDeltaNum = partitionInfo.delta_file_num
val oriReadVersion = partitionInfo.read_version
new MergeOpString02().register(spark, "stringOp")
new MergeOpInt().register(spark, "intOp")
checkAnswer(table.toDF.select(
col("range"),
col("hash"),
expr("stringOP(value1) as value1"),
expr("intOp(value2) as value2")),
Seq(
("range1", "hash1", "a1;b1;c1;d1;e1", 12345),
("range1", "hash2", "a2;b2;c2;d2;e2", 24690),
("range1", "hash3", "a3;b3;c3;d3;e3", 37035))
.toDF("range", "hash", "value1", "value2"))
partitionInfo = snapshotManagement.updateSnapshot().getPartitionInfoArray.head
assert(partitionInfo.delta_file_num ==
(oriDeltaNum % spark.sessionState.conf.getConf(StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM) + 1))
assert(partitionInfo.read_version == oriReadVersion + 3)
}
})
}
test("compaction with part merge") {
withTempDir(dir => {
withSQLConf(
StarLakeSQLConf.PART_MERGE_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_COMPACTION_COMMIT_ENABLE.key -> "true",
StarLakeSQLConf.PART_MERGE_FILE_SIZE_FACTOR.key -> "0.0000001",
StarLakeSQLConf.PART_MERGE_FILE_MINIMUM_NUM.key -> "3") {
val tablePath = dir.getCanonicalPath
val data1 = Seq(("range1", "hash1", "a1", 1), ("range1", "hash2", "a2", 2), ("range1", "hash3", "a3", 3))
.toDF("range", "hash", "value1", "value2")
val data2 = Seq(("range1", "hash1", "b1", 11), ("range1", "hash2", "b2", 22), ("range1", "hash3", "b3", 33))
.toDF("range", "hash", "value1", "value2")
val data3 = Seq(("range1", "hash1", "c1", 111), ("range1", "hash2", "c2", 222), ("range1", "hash3", "c3", 333))
.toDF("range", "hash", "value1", "value2")
val data4 = Seq(("range1", "hash1", "d1", 1111), ("range1", "hash2", "d2", 2222), ("range1", "hash3", "d3", 3333))
.toDF("range", "hash", "value1", "value2")
val data5 = Seq(("range1", "hash1", "e1", 11111), ("range1", "hash2", "e2", 22222), ("range1", "hash3", "e3", 33333))
.toDF("range", "hash", "value1", "value2")
data1.write
.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.save(tablePath)
val table = StarTable.forPath(tablePath)
table.upsert(data2)
table.upsert(data3)
table.upsert(data4)
table.upsert(data5)
val snapshotManagement = SnapshotManagement(tablePath)
var partitionInfo = snapshotManagement.snapshot.getPartitionInfoArray.head
val oriDeltaNum = partitionInfo.delta_file_num
val oriReadVersion = partitionInfo.read_version
new MergeOpString02().register(spark, "stringOp")
new MergeOpInt().register(spark, "intOp")
val mergeOperatorInfo = Map("value2" -> new MergeOpInt())
table.compaction(mergeOperatorInfo)
partitionInfo = snapshotManagement.updateSnapshot().getPartitionInfoArray.head
assert(partitionInfo.delta_file_num == 0)
assert(partitionInfo.read_version == oriReadVersion + 2)
checkAnswer(table.toDF.select(
col("range"),
col("hash"),
expr("stringOP(value1) as value1"),
expr("intOp(value2) as value2")),
Seq(
("range1", "hash1", "e1", 12345),
("range1", "hash2", "e2", 24690),
("range1", "hash3", "e3", 37035))
.toDF("range", "hash", "value1", "value2"))
}
})
}
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/Redo.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
import java.util.concurrent.TimeUnit
import com.engineplus.star.meta.MetaCommit.{unlockMaterialRelation, unlockMaterialViewName}
import com.engineplus.star.meta.UndoLog.{deleteUndoLog, getUndoLogInfo, updateRedoTimestamp}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.utils.RelationTable
object Redo extends Logging {
def redoCommit(table_name: String, table_id: String, commit_id: String): Boolean = {
logInfo("redo other commit~~~ ")
if (updateRedoTimestamp(table_id, commit_id)) {
redoSchemaLock(table_name, table_id, commit_id)
redoAddedFile(table_id, commit_id)
redoExpiredFile(table_id, commit_id)
redoPartitionLock(table_id, commit_id)
redoStreamingRecord(table_id, commit_id)
redoShortTableName(table_id, commit_id)
redoMaterialView(table_id, commit_id)
deleteUndoLog(UndoLogType.Commit.toString, table_id, commit_id)
SnapshotManagement(table_name).updateSnapshot()
true
} else {
TimeUnit.SECONDS.sleep(10)
false
}
}
private def redoPartitionLock(table_id: String, commit_id: String): Unit = {
val partition_undo_arr = getUndoLogInfo(UndoLogType.Partition.toString, table_id, commit_id)
for (partition_undo <- partition_undo_arr) {
MetaVersion.updatePartitionInfo(partition_undo)
MetaLock.unlock(partition_undo.range_id, partition_undo.commit_id)
deleteUndoLog(UndoLogType.Partition.toString, table_id, commit_id, partition_undo.range_id)
}
}
private def redoSchemaLock(table_name: String, table_id: String, commit_id: String): Unit = {
val schema_undo_arr = getUndoLogInfo(UndoLogType.Schema.toString, table_id, commit_id)
for (schema_undo <- schema_undo_arr) {
MetaVersion.updateTableSchema(
table_name,
table_id,
schema_undo.table_schema,
schema_undo.setting,
schema_undo.write_version.toInt)
MetaLock.unlock(table_id, commit_id)
deleteUndoLog(UndoLogType.Schema.toString, table_id, commit_id)
}
}
private def redoAddedFile(table_id: String, commit_id: String): Unit = {
val add_file_undo_arr = getUndoLogInfo(UndoLogType.AddFile.toString, table_id, commit_id)
for (add_file_undo <- add_file_undo_arr) {
DataOperation.redoAddedNewDataFile(add_file_undo)
deleteUndoLog(
UndoLogType.AddFile.toString,
table_id,
commit_id,
add_file_undo.range_id,
add_file_undo.file_path)
}
}
private def redoExpiredFile(table_id: String, commit_id: String): Unit = {
val expire_file_undo_arr = getUndoLogInfo(UndoLogType.ExpireFile.toString, table_id, commit_id)
for (expire_file_undo <- expire_file_undo_arr) {
DataOperation.redoExpireDataFile(expire_file_undo)
deleteUndoLog(
UndoLogType.ExpireFile.toString,
table_id,
commit_id,
expire_file_undo.range_id,
expire_file_undo.file_path)
}
}
private def redoStreamingRecord(table_id: String, commit_id: String): Unit = {
val streaming_undo_arr = getUndoLogInfo(UndoLogType.Commit.toString, table_id, commit_id)
for (streaming_undo <- streaming_undo_arr) {
if (streaming_undo.query_id != null
&& streaming_undo.query_id.nonEmpty
&& !streaming_undo.query_id.equals(MetaUtils.UNDO_LOG_DEFAULT_VALUE)
&& streaming_undo.batch_id >= 0) {
StreamingRecord.updateStreamingInfo(
table_id,
streaming_undo.query_id,
streaming_undo.batch_id,
streaming_undo.timestamp)
}
}
}
private def redoShortTableName(table_id: String, commit_id: String): Unit = {
val info = getUndoLogInfo(UndoLogType.ShortTableName.toString, table_id, commit_id)
if (info.nonEmpty) {
//add short name to table_info
MetaVersion.updateTableShortName(
info.head.table_name,
table_id,
info.head.short_table_name)
deleteUndoLog(UndoLogType.ShortTableName.toString, table_id, commit_id)
}
}
private def redoMaterialView(table_id: String, commit_id: String): Unit = {
val undoInfo = getUndoLogInfo(UndoLogType.Material.toString, table_id, commit_id)
if (undoInfo.nonEmpty) {
val info = undoInfo.head
if (info.is_creating_view) {
//add material view
MaterialView.addMaterialView(
info.short_table_name,
info.table_name,
table_id,
info.relation_tables,
info.sql_text,
info.auto_update,
info.view_info)
//unlock material view
unlockMaterialViewName(commit_id, info.short_table_name)
//update material relation
info.relation_tables.split(",").map(m => RelationTable.build(m)).foreach(table => {
//update
MetaCommit.updateMaterialRelation(
table_id = table.tableId,
table_name = table.tableName,
view_name = info.short_table_name)
//unlock material relation
unlockMaterialRelation(commit_id = commit_id, table_id = table.tableId)
})
} else {
//update material view
MaterialView.updateMaterialView(info.short_table_name, info.relation_tables, info.auto_update)
//unlock material view
unlockMaterialViewName(commit_id, info.short_table_name)
}
//delete undo log
deleteUndoLog(
commit_type = UndoLogType.Material.toString,
table_id = table_id,
commit_id = commit_id)
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/utils/FileOperation.scala
|
package org.apache.spark.sql.star.utils
import java.io.{FileNotFoundException, IOException}
import java.net.URI
import java.util.Locale
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkEnv
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.sql.star.storage.LogStore
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.util.SerializableConfiguration
import scala.util.Random
import scala.util.control.NonFatal
object FileOperation extends Logging {
/**
* Create an absolute path from `child` using the `basePath` if the child is a relative path.
* Return `child` if it is an absolute path.
*
* @param basePath Base path to prepend to `child` if child is a relative path.
* Note: It is assumed that the basePath do not have any escaped characters and
* is directly readable by Hadoop APIs.
* @param child Child path to append to `basePath` if child is a relative path.
* Note: t is assumed that the child is escaped, that is, all special chars that
* need escaping by URI standards are already escaped.
* @return Absolute path without escaped chars that is directly readable by Hadoop APIs.
*/
def absolutePath(basePath: String, child: String): Path = {
val p = new Path(new URI(child))
if (p.isAbsolute) {
p
} else {
val merged = new Path(basePath, p)
// URI resolution strips the final `/` in `p` if it exists
val mergedUri = merged.toUri.toString
if (child.endsWith("/") && !mergedUri.endsWith("/")) {
new Path(new URI(mergedUri + "/"))
} else {
merged
}
}
}
/**
* Given a path `child`:
* 1. Returns `child` if the path is already relative
* 2. Tries relativizing `child` with respect to `basePath`
* a) If the `child` doesn't live within the same base path, returns `child` as is
* b) If `child` lives in a different FileSystem, throws an exception
* Note that `child` may physically be pointing to a path within `basePath`, but may logically
* belong to a different FileSystem, e.g. DBFS mount points and direct S3 paths.
*/
def tryRelativizePath(fs: FileSystem, basePath: Path, child: Path): Path = {
// Child Paths can be absolute and use a separate fs
val childUri = child.toUri
// We can map multiple schemes to the same `FileSystem` class, but `FileSystem.getScheme` is
// usually just a hard-coded string. Hence, we need to use the scheme of the URI that we use to
// create the FileSystem here.
if (child.isAbsolute) {
try {
new Path(fs.makeQualified(basePath).toUri.relativize(fs.makeQualified(child).toUri))
} catch {
case e: IllegalArgumentException =>
throw new IllegalStateException(
s"""Failed to relativize the path ($child). This can happen when absolute paths make
|it into the transaction log, which start with the scheme s3://, wasbs:// or adls://.
|This is a bug that has existed before DBR 5.0. To fix this issue, please upgrade
|your writer jobs to DBR 5.0
""".stripMargin)
}
} else {
child
}
}
/** Check if the thrown exception is a throttling error. */
private def isThrottlingError(t: Throwable): Boolean = {
Option(t.getMessage).exists(_.toLowerCase(Locale.ROOT).contains("slow down"))
}
private def randomBackoff(opName: String,
t: Throwable,
base: Int = 100,
jitter: Int = 1000): Unit = {
val sleepTime = Random.nextInt(jitter) + base
logWarning(s"Sleeping for $sleepTime ms to rate limit $opName", t)
Thread.sleep(sleepTime)
}
/** Iterate through the contents of directories. */
private def listUsingLogStore(logStore: LogStore,
subDirs: Iterator[String],
recurse: Boolean,
hiddenFileNameFilter: String => Boolean): Iterator[SerializableFileStatus] = {
def list(dir: String, tries: Int): Iterator[SerializableFileStatus] = {
logInfo(s"Listing $dir")
try {
logStore.listFrom(new Path(dir, "\u0000"))
.filterNot(f => hiddenFileNameFilter(f.getPath.getName))
.map(SerializableFileStatus.fromStatus)
} catch {
case NonFatal(e) if isThrottlingError(e) && tries > 0 =>
randomBackoff("listing", e)
list(dir, tries - 1)
case e: FileNotFoundException =>
// Can happen when multiple GCs are running concurrently or due to eventual consistency
Iterator.empty
}
}
val filesAndDirs = subDirs.flatMap { dir =>
list(dir, tries = 10)
}
if (recurse) {
recurseDirectories(logStore, filesAndDirs, hiddenFileNameFilter)
} else {
filesAndDirs
}
}
/** Given an iterator of files and directories, recurse directories with its contents. */
private def recurseDirectories(logStore: LogStore,
filesAndDirs: Iterator[SerializableFileStatus],
hiddenFileNameFilter: String => Boolean): Iterator[SerializableFileStatus] = {
filesAndDirs.flatMap {
case dir: SerializableFileStatus if dir.isDir =>
Iterator.single(dir) ++ listUsingLogStore(
logStore, Iterator.single(dir.path), recurse = true, hiddenFileNameFilter)
case file =>
Iterator.single(file)
}
}
/**
* The default filter for hidden files. Files names beginning with _ or . are considered hidden.
*
* @param fileName
* @return true if the file is hidden
*/
def defaultHiddenFileFilter(fileName: String): Boolean = {
fileName.startsWith("_") || fileName.startsWith(".")
}
/**
* Recursively lists all the files and directories for the given `subDirs` in a scalable manner.
*
* @param spark The SparkSession
* @param subDirs Absolute path of the subdirectories to list
* @param hadoopConf The Hadoop Configuration to get a FileSystem instance
* @param hiddenFileNameFilter A function that returns true when the file should be considered
* hidden and excluded from results. Defaults to checking for prefixes
* of "." or "_".
*/
def recursiveListDirs(spark: SparkSession,
subDirs: Seq[String],
hadoopConf: Broadcast[SerializableConfiguration],
hiddenFileNameFilter: String => Boolean = defaultHiddenFileFilter,
fileListingParallelism: Option[Int] = None): Dataset[SerializableFileStatus] = {
import spark.implicits._
if (subDirs.isEmpty) return spark.emptyDataset[SerializableFileStatus]
val listParallelism = fileListingParallelism.getOrElse(spark.sparkContext.defaultParallelism)
val dirsAndFiles = spark.sparkContext.parallelize(subDirs).mapPartitions { dirs =>
val logStore = LogStore(SparkEnv.get.conf, hadoopConf.value.value)
listUsingLogStore(logStore, dirs, recurse = false, hiddenFileNameFilter)
}.repartition(listParallelism) // Initial list of subDirs may be small
val allDirsAndFiles = dirsAndFiles.mapPartitions { firstLevelDirsAndFiles =>
val logStore = LogStore(SparkEnv.get.conf, hadoopConf.value.value)
recurseDirectories(logStore, firstLevelDirsAndFiles, hiddenFileNameFilter)
}
spark.createDataset(allDirsAndFiles)
}
/**
* Tries deleting a file or directory non-recursively. If the file/folder doesn't exist,
* that's fine, a separate operation may be deleting files/folders. If a directory is non-empty,
* we shouldn't delete it. FileSystem implementations throw an `IOException` in those cases,
* which we return as a "we failed to delete".
*
* Listing on S3 is not consistent after deletes, therefore in case the `delete` returns `false`,
* because the file didn't exist, then we still return `true`. Retries on S3 rate limits up to 3
* times.
*/
def tryDeleteNonRecursive(fs: FileSystem, path: Path, tries: Int = 3): Boolean = {
try fs.delete(path, false) catch {
case _: FileNotFoundException => true
case _: IOException => false
case NonFatal(e) if isThrottlingError(e) && tries > 0 =>
randomBackoff("deletes", e)
tryDeleteNonRecursive(fs, path, tries - 1)
}
}
def tryDeleteRecursive(fs: FileSystem, path: Path, tries: Int = 3): Boolean = {
try fs.delete(path, true) catch {
case _: FileNotFoundException => true
case _: IOException => false
case NonFatal(e) if isThrottlingError(e) && tries > 0 =>
randomBackoff("deletes", e)
tryDeleteRecursive(fs, path, tries - 1)
}
}
/**
* Returns all the levels of sub directories that `path` has with respect to `base`. For example:
* getAllSubDirectories("/base", "/base/a/b/c") =>
* (Iterator("/base/a", "/base/a/b"), "/base/a/b/c")
*/
def getAllSubDirectories(base: String, path: String): (Iterator[String], String) = {
val baseSplits = base.split(Path.SEPARATOR)
val pathSplits = path.split(Path.SEPARATOR).drop(baseSplits.length)
val it = Iterator.tabulate(pathSplits.length - 1) { i =>
(baseSplits ++ pathSplits.take(i + 1)).mkString(Path.SEPARATOR)
}
(it, path)
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/SnapshotManagement.scala
|
<filename>src/main/scala/org/apache/spark/sql/star/SnapshotManagement.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import java.io.File
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock
import com.engineplus.star.meta.{MetaUtils, MetaVersion}
import com.google.common.cache.{CacheBuilder, RemovalNotification}
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.functions._
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.sources.{StarLakeBaseRelation, StarLakeSourceUtils}
import org.apache.spark.sql.star.utils.{DataFileInfo, PartitionInfo, TableInfo}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, SparkSession}
import scala.collection.JavaConverters._
class SnapshotManagement(path: String) extends Logging {
val table_name: String = MetaUtils.modifyTableString(path)
lazy private val spark: SparkSession = SparkSession.active
lazy private val lock = new ReentrantLock()
private var currentSnapshot: Snapshot = getCurrentSnapshot
def snapshot: Snapshot = currentSnapshot
private def createSnapshot: Snapshot = {
val table_info = MetaVersion.getTableInfo(table_name)
val partition_info_arr = MetaVersion.getAllPartitionInfo(table_info.table_id)
if (table_info.table_schema.isEmpty) {
throw StarLakeErrors.schemaNotSetException
}
new Snapshot(table_info, partition_info_arr)
}
private def initSnapshot: Snapshot = {
val table_path = new Path(table_name)
val fs = table_path.getFileSystem(spark.sessionState.newHadoopConf())
if (fs.exists(table_path) && fs.listStatus(table_path).nonEmpty) {
throw StarLakeErrors.failedCreateTableException(table_name)
}
val table_id = "table_" + java.util.UUID.randomUUID().toString
val range_id = "range_" + java.util.UUID.randomUUID().toString
val table_info = TableInfo(table_name, table_id)
val partition_arr = Array(
PartitionInfo(table_id, range_id, table_name, MetaUtils.DEFAULT_RANGE_PARTITION_VALUE, 1, 1)
)
new Snapshot(table_info, partition_arr, true)
}
private def getCurrentSnapshot: Snapshot = {
if (StarLakeSourceUtils.isStarLakeTableExists(table_name)) {
createSnapshot
} else {
//table_name in SnapshotManagement must be a root path, and its parent path shouldn't be star table
if (StarLakeUtils.isStarLakeTable(table_name)) {
throw new AnalysisException("table_name is expected as root path in SnapshotManagement")
}
initSnapshot
}
}
def updateSnapshot(): Snapshot = {
lockInterruptibly {
val new_snapshot = getCurrentSnapshot
currentSnapshot.uncache()
currentSnapshot = new_snapshot
currentSnapshot
}
}
//get table info only
def getTableInfoOnly: TableInfo = {
if (StarLakeSourceUtils.isStarLakeTableExists(table_name)) {
MetaVersion.getTableInfo(table_name)
} else {
val table_id = "table_" + java.util.UUID.randomUUID().toString
TableInfo(table_name, table_id)
}
}
def startTransaction(): TransactionCommit = {
updateSnapshot()
new TransactionCommit(this)
}
/**
* Execute a piece of code within a new [[TransactionCommit]]. Reads/write sets will
* be recorded for this table, and all other tables will be read
* at a snapshot that is pinned on the first access.
*
* @note This uses thread-local variable to make the active transaction visible. So do not use
* multi-threaded code in the provided thunk.
*/
def withNewTransaction[T](thunk: TransactionCommit => T): T = {
try {
val tc = startTransaction()
TransactionCommit.setActive(tc)
thunk(tc)
} finally {
TransactionCommit.clearActive()
}
}
/**
* using with part merge.
*
* @note This uses thread-local variable to make the active transaction visible. So do not use
* multi-threaded code in the provided thunk.
*/
def withNewPartMergeTransaction[T](thunk: PartMergeTransactionCommit => T): T = {
try {
// updateSnapshot()
val tc = new PartMergeTransactionCommit(this)
PartMergeTransactionCommit.setActive(tc)
thunk(tc)
} finally {
PartMergeTransactionCommit.clearActive()
}
}
/**
* Checks whether this table only accepts appends. If so it will throw an error in operations that
* can remove data such as DELETE/UPDATE/MERGE.
*/
def assertRemovable(): Unit = {
if (StarLakeConfig.IS_APPEND_ONLY.fromTableInfo(snapshot.getTableInfo)) {
throw StarLakeErrors.modifyAppendOnlyTableException
}
}
def createRelation(partitionFilters: Seq[Expression] = Nil): BaseRelation = {
val files: Array[DataFileInfo] = PartitionFilter.filesForScan(snapshot, partitionFilters)
StarLakeBaseRelation(files, this)(spark)
}
def createDataFrame(files: Seq[DataFileInfo],
requiredColumns: Seq[String],
predicts: Option[Expression] = None): DataFrame = {
val skipFiles = if (predicts.isDefined) {
val predictFiles = PartitionFilter.filesForScan(snapshot, Seq(predicts.get))
files.intersect(predictFiles)
} else {
files
}
val fileIndex = BatchDataFileIndexV2(spark, this, skipFiles)
val table = StarLakeTableV2(
spark,
new Path(table_name),
None,
None,
Option(fileIndex)
)
val option = new CaseInsensitiveStringMap(Map("basePath" -> table_name).asJava)
Dataset.ofRows(
spark,
DataSourceV2Relation(
table,
table.schema().toAttributes,
None,
None,
option
)
).select(requiredColumns.map(col): _*)
}
def lockInterruptibly[T](body: => T): T = {
lock.lockInterruptibly()
try {
body
} finally {
lock.unlock()
}
}
}
object SnapshotManagement {
/**
* We create only a single [[SnapshotManagement]] for any given path to avoid wasted work
* in reconstructing.
*/
private val snapshotManagementCache = {
val builder = CacheBuilder.newBuilder()
.expireAfterAccess(60, TimeUnit.MINUTES)
.removalListener((removalNotification: RemovalNotification[String, SnapshotManagement]) => {
val snapshotManagement = removalNotification.getValue
try snapshotManagement.snapshot.uncache() catch {
case _: java.lang.NullPointerException =>
// Various layers will throw null pointer if the RDD is already gone.
}
})
builder.maximumSize(5).build[String, SnapshotManagement]()
}
def forTable(spark: SparkSession, tableName: TableIdentifier): SnapshotManagement = {
val catalog = spark.sessionState.catalog
val catalogTable = catalog.getTableMetadata(tableName).location
apply(new Path(catalogTable))
}
def forTable(dataPath: File): SnapshotManagement = {
apply(new Path(dataPath.getAbsolutePath))
}
def apply(path: Path): SnapshotManagement = apply(path.toString)
def apply(path: String): SnapshotManagement = {
val table_path: String = MetaUtils.modifyTableString(path)
try {
snapshotManagementCache.get(table_path, () => {
AnalysisHelper.allowInvokingTransformsInAnalyzer {
new SnapshotManagement(table_path)
}
})
} catch {
case e: com.google.common.util.concurrent.UncheckedExecutionException =>
throw e.getCause
}
}
def invalidateCache(path: String): Unit = {
val table_path: String = MetaUtils.modifyTableString(path)
snapshotManagementCache.invalidate(table_path)
}
def clearCache(): Unit = {
snapshotManagementCache.invalidateAll()
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/sources/StarLakeDataSource.scala
|
<filename>src/main/scala/org/apache/spark/sql/star/sources/StarLakeDataSource.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.sources
import com.engineplus.star.meta.MetaCommit
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.expressions.{EqualTo, Expression, Literal}
import org.apache.spark.sql.connector.catalog.{Table, TableProvider}
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.execution.streaming.Sink
import org.apache.spark.sql.sources._
import org.apache.spark.sql.star._
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.commands.WriteIntoTable
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.utils.PartitionUtils
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.json4s.jackson.Serialization
import org.json4s.{Formats, NoTypeHints}
class StarLakeDataSource
extends DataSourceRegister
with RelationProvider
with CreatableRelationProvider
with StreamSinkProvider
with TableProvider
with Logging {
override def shortName(): String = {
StarLakeSourceUtils.NAME
}
override def createSink(sqlContext: SQLContext,
parameters: Map[String, String],
partitionColumns: Seq[String],
outputMode: OutputMode): Sink = {
val path = parameters.getOrElse("path", {
throw StarLakeErrors.pathNotSpecifiedException
})
val snapshot = SnapshotManagement(path).snapshot
val tableInfo = snapshot.getTableInfo
//before sink, checkAndRedoCommit first
MetaCommit.checkAndRedoCommit(snapshot)
//update mode can only be used with hash partition
if (outputMode == OutputMode.Update()) {
if (tableInfo.hash_column.isEmpty && parameters.getOrElse("hashpartitions", "").isEmpty) {
throw StarLakeErrors.outputModeNotSupportedException(getClass.getName, outputMode)
}
}
//add partition info to parameters to support partitionBy in streaming sink
val newParam = if (partitionColumns.nonEmpty) {
parameters ++ Map(
DataSourceUtils.PARTITIONING_COLUMNS_KEY ->
DataSourceUtils.encodePartitioningColumns(partitionColumns)
)
} else {
parameters
}
val options = new StarLakeOptions(newParam, sqlContext.sparkSession.sessionState.conf)
new StarLakeSink(sqlContext, new Path(path), outputMode, options)
}
override def createRelation(sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val path = parameters.getOrElse("path", {
throw StarLakeErrors.pathNotSpecifiedException
})
val snapshot_manage = SnapshotManagement(path)
WriteIntoTable(
snapshot_manage,
mode = mode,
new StarLakeOptions(parameters, sqlContext.sparkSession.sessionState.conf),
Map.empty,
data).run(sqlContext.sparkSession)
snapshot_manage.createRelation()
}
override def createRelation(sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
val path = parameters.getOrElse("path", {
throw StarLakeErrors.pathNotSpecifiedException
})
StarLakeTableV2(sqlContext.sparkSession, new Path(path)).toBaseRelation
}
def inferSchema: StructType = new StructType() // empty
override def inferSchema(options: CaseInsensitiveStringMap): StructType = inferSchema
override def getTable(schema: StructType,
partitioning: Array[Transform],
properties: java.util.Map[String, String]): Table = {
val options = new CaseInsensitiveStringMap(properties)
val path = options.get("path")
if (path == null) throw StarLakeErrors.pathNotSpecifiedException
StarLakeTableV2(SparkSession.active, new Path(path))
}
}
object StarLakeDataSource extends Logging {
private implicit val formats: Formats = Serialization.formats(NoTypeHints)
def encodePartitioningColumns(columns: Seq[String]): String = {
Serialization.write(columns)
}
def decodePartitioningColumns(str: String): Seq[String] = {
Serialization.read[Seq[String]](str)
}
/**
* For StarTable, we allow certain magic to be performed through the paths that are provided by users.
* Normally, a user specified path should point to the root of a StarTable. However, some users
* are used to providing specific partition values through the path, because of how expensive it
* was to perform partition discovery before. We treat these partition values as logical partition
* filters, if a table does not exist at the provided path.
*
* In addition, we allow users to provide time travel specifications through the path. This is
* provided after an `@` symbol after a path followed by a time specification in
* `yyyyMMddHHmmssSSS` format, or a version number preceded by a `v`.
*
* This method parses these specifications and returns these modifiers only if a path does not
* really exist at the provided path. We first parse out the time travel specification, and then
* the partition filters. For example, a path specified as:
* /some/path/partition=1@v1234
* will be parsed into `/some/path` with filters `partition=1` and a time travel spec of version
* 1234.
*
* @return A tuple of the root path of the StarTable, partition filters, and time travel options
*/
def parsePathIdentifier(spark: SparkSession,
path: String): (Path, Seq[(String, String)]) = {
val hadoopPath = new Path(path)
val rootPath = StarLakeUtils.findTableRootPath(spark, hadoopPath).getOrElse {
throw StarLakeErrors.tableNotExistsException(path)
}
val partitionFilters = if (rootPath != hadoopPath) {
logInfo(
"""
|WARNING: loading partitions directly with star is not recommended.
|If you are trying to read a specific partition, use a where predicate.
|
|CORRECT: spark.read.format("star").load("/data").where("part=1")
|INCORRECT: spark.read.format("star").load("/data/part=1")
""".stripMargin)
val fragment = hadoopPath.toString.substring(rootPath.toString.length() + 1)
try {
PartitionUtils.parsePathFragmentAsSeq(fragment)
} catch {
case _: ArrayIndexOutOfBoundsException =>
throw StarLakeErrors.partitionPathParseException(fragment)
}
} else {
Nil
}
(rootPath, partitionFilters)
}
/**
* Verifies that the provided partition filters are valid and returns the corresponding
* expressions.
*/
def verifyAndCreatePartitionFilters(userPath: String,
snapshot: Snapshot,
partitionFilters: Seq[(String, String)]): Seq[Expression] = {
if (partitionFilters.nonEmpty) {
val table_info = snapshot.getTableInfo
val badColumns = partitionFilters.map(_._1).filterNot(table_info.range_partition_columns.contains)
if (badColumns.nonEmpty) {
val fragment = partitionFilters.map(f => s"${f._1}=${f._2}").mkString("/")
throw StarLakeErrors.partitionPathInvolvesNonPartitionColumnException(badColumns, fragment)
}
val filters = partitionFilters.map { case (key, value) =>
// Nested fields cannot be partitions, so we pass the key as a identifier
EqualTo(UnresolvedAttribute(Seq(key)), Literal(value))
}
val files = PartitionFilter.partitionsForScan(snapshot, filters)
if (files.isEmpty) {
throw StarLakeErrors.tableNotExistsException(userPath)
}
filters
} else {
Nil
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/StarLakeUtils.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star
import com.engineplus.star.meta.MetaUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.{Expression, PredicateHelper, SubqueryExpression}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator.MergeOperator
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2ScanRelation}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.rules.StarLakeRelation
import org.apache.spark.sql.star.sources.{StarLakeBaseRelation, StarLakeSQLConf, StarLakeSourceUtils}
import org.apache.spark.sql.star.utils.DataFileInfo
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.util.Utils
object StarLakeUtils extends PredicateHelper {
val MERGE_OP_COL = "_star_merge_col_name_"
val MERGE_OP = "_star_merge_op_"
lazy val USE_MATERIAL_REWRITE = "_star_use_material_rewrite_"
def executeWithoutQueryRewrite[T](sparkSession: SparkSession)(f: => T): Unit = {
sparkSession.conf.set(USE_MATERIAL_REWRITE, "false")
f
sparkSession.conf.set(USE_MATERIAL_REWRITE, "true")
}
def enableAsyncIO(tablePath: String, conf: SQLConf): Boolean = {
val validFormat = tablePath.startsWith("s3") || tablePath.startsWith("oss")
validFormat && conf.getConf(StarLakeSQLConf.ASYNC_IO_ENABLE)
}
def getClass(className: String): Class[_] = {
Class.forName(className, true, Utils.getContextOrSparkClassLoader)
}
/** return async class */
def getAsyncClass(className: String): (Boolean, Class[_]) = {
try {
val cls = Class.forName(className, true, Utils.getContextOrSparkClassLoader)
(true, cls)
} catch {
case e: ClassNotFoundException => (false, null)
case e: Exception => throw e
}
}
/** Check whether this table is a StarTable based on information from the Catalog. */
def isStarLakeTable(table: CatalogTable): Boolean = StarLakeSourceUtils.isStarLakeTable(table.provider)
/**
* Check whether the provided table name is a star table based on information from the Catalog.
*/
def isStarLakeTable(spark: SparkSession, tableName: TableIdentifier): Boolean = {
val catalog = spark.sessionState.catalog
val tableIsNotTemporaryTable = !catalog.isTemporaryTable(tableName)
val tableExists =
(tableName.database.isEmpty || catalog.databaseExists(tableName.database.get)) &&
catalog.tableExists(tableName)
tableIsNotTemporaryTable && tableExists && isStarLakeTable(catalog.getTableMetadata(tableName))
}
/** Check if the provided path is the root or the children of a star table. */
def isStarLakeTable(spark: SparkSession, path: Path): Boolean = {
findTableRootPath(spark, path).isDefined
}
def isStarLakeTable(tablePath: String): Boolean = {
val sparkSession = SparkSession.getActiveSession.getOrElse {
throw new IllegalArgumentException("Could not find active SparkSession")
}
isStarLakeTable(sparkSession, new Path(MetaUtils.modifyTableString(tablePath)))
}
def findTableRootPath(spark: SparkSession, path: Path): Option[Path] = {
var current_path = path
while (current_path != null) {
if (StarLakeSourceUtils.isStarLakeTableExists(current_path.toString)) {
return Option(current_path)
}
current_path = current_path.getParent
}
None
}
/**
* Partition the given condition into two sequence of conjunctive predicates:
* - predicates that can be evaluated using metadata only.
* - other predicates.
*/
def splitMetadataAndDataPredicates(condition: Expression,
partitionColumns: Seq[String],
spark: SparkSession): (Seq[Expression], Seq[Expression]) = {
splitConjunctivePredicates(condition).partition(
isPredicateMetadataOnly(_, partitionColumns, spark))
}
/**
* Check if condition can be evaluated using only metadata. In StarTable, this means the condition
* only references partition columns and involves no subquery.
*/
def isPredicateMetadataOnly(condition: Expression,
partitionColumns: Seq[String],
spark: SparkSession): Boolean = {
isPredicatePartitionColumnsOnly(condition, partitionColumns, spark) &&
!containsSubquery(condition)
}
def isPredicatePartitionColumnsOnly(condition: Expression,
partitionColumns: Seq[String],
spark: SparkSession): Boolean = {
val nameEquality = spark.sessionState.analyzer.resolver
condition.references.forall { r =>
partitionColumns.exists(nameEquality(r.name, _))
}
}
def containsSubquery(condition: Expression): Boolean = {
SubqueryExpression.hasSubquery(condition)
}
/**
* Replace the file index in a logical plan and return the updated plan.
* It's a common pattern that, in StarTable commands, we use data skipping to determine a subset of
* files that can be affected by the command, so we replace the whole-table file index in the
* original logical plan with a new index of potentially affected files, while everything else in
* the original plan, e.g., resolved references, remain unchanged.
*
* @param target the logical plan in which we replace the file index
*/
def replaceFileIndex(target: LogicalPlan,
files: Seq[DataFileInfo]): LogicalPlan = {
target transform {
case l@LogicalRelation(egbr: StarLakeBaseRelation, _, _, _) =>
l.copy(relation = egbr.copy(files = files)(egbr.sparkSession))
}
}
def replaceFileIndexV2(target: LogicalPlan,
files: Seq[DataFileInfo]): LogicalPlan = {
EliminateSubqueryAliases(target) match {
case sr@DataSourceV2Relation(tbl: StarLakeTableV2, _, _, _, _) =>
sr.copy(table = tbl.copy(userDefinedFileIndex = Option(BatchDataFileIndexV2(tbl.spark, tbl.snapshotManagement, files))))
case _ => throw StarLakeErrors.starRelationIllegalException()
}
}
/** Whether a path should be hidden for star-related file operations, such as cleanup. */
def isHiddenDirectory(partitionColumnNames: Seq[String], pathName: String): Boolean = {
// Names of the form partitionCol=[value] are partition directories, and should be
// GCed even if they'd normally be hidden. The _db_index directory contains (bloom filter)
// indexes and these must be GCed when the data they are tied to is GCed.
(pathName.startsWith(".") || pathName.startsWith("_")) &&
!partitionColumnNames.exists(c => pathName.startsWith(c ++ "="))
}
}
/**
* Extractor Object for pulling out the table scan of a StarTable. It could be a full scan
* or a partial scan.
*/
object StarLakeTable {
def unapply(a: LogicalRelation): Option[StarLakeBaseRelation] = a match {
case LogicalRelation(epbr: StarLakeBaseRelation, _, _, _) =>
Some(epbr)
case _ =>
None
}
}
/**
* Extractor Object for pulling out the full table scan of a Star table.
*/
object StarLakeFullTable {
def unapply(a: LogicalPlan): Option[StarLakeBaseRelation] = a match {
case PhysicalOperation(_, filters, lr@StarLakeTable(epbr: StarLakeBaseRelation)) =>
if (epbr.snapshotManagement.snapshot.isFirstCommit) return None
if (filters.isEmpty) {
Some(epbr)
} else {
throw new AnalysisException(
s"Expect a full scan of Star sources, but found a partial scan. " +
s"path:${epbr.snapshotManagement.table_name}")
}
// Convert V2 relations to V1 and perform the check
case StarLakeRelation(lr) => unapply(lr)
case _ => None
}
}
object StarLakeTableRelationV2 {
def unapply(plan: LogicalPlan): Option[StarLakeTableV2] = plan match {
case DataSourceV2Relation(table: StarLakeTableV2, _, _, _, _) => Some(table)
case DataSourceV2ScanRelation(DataSourceV2Relation(table: StarLakeTableV2, _, _, _, _), _, _) => Some(table)
case _ => None
}
}
object StarLakeTableV2ScanRelation {
def unapply(plan: LogicalPlan): Option[DataSourceV2ScanRelation] = plan match {
case dsv2@DataSourceV2Relation(t: StarLakeTableV2, _, _, _, _) => Some(createScanRelation(t, dsv2))
case _ => None
}
def createScanRelation(table: StarLakeTableV2, v2Relation: DataSourceV2Relation): DataSourceV2ScanRelation = {
DataSourceV2ScanRelation(
v2Relation,
table.newScanBuilder(v2Relation.options).build(),
v2Relation.output)
}
}
class MergeOpLong extends MergeOperator[Long] {
override def mergeData(input: Seq[Long]): Long = {
input.sum
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/commands/DropTableSuite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import com.engineplus.star.meta.{MetaUtils, MetaVersion}
import com.engineplus.star.tables.{StarTableTestUtils, StarTable}
import org.apache.spark.sql.star.test.StarLakeTestUtils
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.scalatest.BeforeAndAfterEach
class DropTableSuite extends QueryTest
with SharedSparkSession with BeforeAndAfterEach
with StarLakeTestUtils {
import testImplicits._
lazy val cassandraConnectot = MetaUtils.cassandraConnector
lazy val dataBase = MetaUtils.DATA_BASE
def tableNotExists(tablePath: String, tableId: String): Boolean = {
cassandraConnectot.withSessionDo(session => {
val res = session.execute(
s"""
|select table_id from $dataBase.table_info
|where table_name='$tablePath'
""".stripMargin).one()
var flag = true
try {
if (res.getString("table_id").equals(tableId)) {
flag = false
}
} catch {
case e: Exception =>
}
flag
})
}
def metaNotExists(metaTable: String, tableId: String): Boolean = {
if (StarTableTestUtils.getNumByTableId(metaTable, tableId) != 0) {
false
} else {
true
}
}
def partitionNotExists(tableId: String, rangeValue: String, rangeId: String): Boolean = {
cassandraConnectot.withSessionDo(session => {
val res = session.execute(
s"""
|select range_id from $dataBase.partition_info
|where table_id='$tableId' and range_value='$rangeValue' allow filtering
""".stripMargin).one()
if (res.getString("range_id").equals(rangeId)) {
false
} else {
true
}
})
}
def dataNotExists(tableId: String, rangeId: String): Boolean = {
if (StarTableTestUtils.getNumByTableIdAndRangeId("data_info", tableId, rangeId) != 0) {
false
} else {
true
}
}
test("drop table") {
withTempDir(f => {
val tmpPath = f.getCanonicalPath
Seq((1, 2), (2, 3), (3, 4)).toDF("key", "value")
.write
.format("star")
.save(tmpPath)
val tableId = MetaVersion.getTableInfo(MetaUtils.modifyTableString(tmpPath)).table_id
StarTable.forPath(tmpPath).dropTable()
assert(tableNotExists(tmpPath, tableId))
assert(metaNotExists("partition_info", tableId))
assert(metaNotExists("data_info", tableId))
assert(metaNotExists("fragment_value", tableId))
})
}
test("drop partition") {
withTempDir(f => {
val tmpPath = f.getCanonicalPath
Seq((1, 2), (2, 3), (3, 4)).toDF("key", "value")
.write
.partitionBy("key")
.format("star")
.save(tmpPath)
val tableInfo = MetaVersion.getTableInfo(MetaUtils.modifyTableString(tmpPath))
val partitionInfo = MetaVersion.getAllPartitionInfo(tableInfo.table_id)
val e1 = intercept[AnalysisException] {
StarTable.forPath(tmpPath).dropPartition("key=1 or key=2")
}
assert(e1.getMessage().contains("You can only drop one partition once time"))
val e2 = intercept[AnalysisException] {
StarTable.forPath(tmpPath).dropPartition("key=4")
}
assert(e2.getMessage().contains("Partition not found by condition"))
StarTable.forPath(tmpPath).dropPartition("key=1")
checkAnswer(
spark.read.format("star").load(tmpPath).select("key", "value"),
Row(2, 3) :: Row(3, 4) :: Nil)
Seq((1, 22)).toDF("key", "value")
.write
.mode("append")
.format("star")
.save(tmpPath)
checkAnswer(
spark.read.format("star").load(tmpPath).select("key", "value"),
Row(1, 22) :: Row(2, 3) :: Row(3, 4) :: Nil)
val rangeId = partitionInfo.find(_.range_value.equals("key=1")).get.range_id
assert(partitionNotExists(tableInfo.table_id, "key=1", rangeId))
assert(dataNotExists(tableInfo.table_id, rangeId))
})
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/BucketParquetScan.scala
|
<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.parquet
import java.util.Locale
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.ParquetInputFormat
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.connector.read.PartitionReaderFactory
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources.parquet.{ParquetReadSupport, ParquetWriteSupport}
import org.apache.spark.sql.execution.datasources.v2.FileScan
import org.apache.spark.sql.execution.datasources.{BucketingUtils, FilePartition, PartitionedFile, PartitioningAwareFileIndex}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.star.StarLakeUtils
import org.apache.spark.sql.star.utils.TableInfo
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.util.SerializableConfiguration
case class BucketParquetScan(sparkSession: SparkSession,
hadoopConf: Configuration,
fileIndex: PartitioningAwareFileIndex,
dataSchema: StructType,
readDataSchema: StructType,
readPartitionSchema: StructType,
pushedFilters: Array[Filter],
options: CaseInsensitiveStringMap,
tableInfo: TableInfo,
partitionFilters: Seq[Expression] = Seq.empty,
dataFilters: Seq[Expression] = Seq.empty) extends FileScan {
override def isSplitable(path: Path): Boolean = false
override def createReaderFactory(): PartitionReaderFactory = {
val readDataSchemaAsJson = readDataSchema.json
hadoopConf.set(ParquetInputFormat.READ_SUPPORT_CLASS, classOf[ParquetReadSupport].getName)
hadoopConf.set(
ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA,
readDataSchemaAsJson)
hadoopConf.set(
ParquetWriteSupport.SPARK_ROW_SCHEMA,
readDataSchemaAsJson)
hadoopConf.set(
SQLConf.SESSION_LOCAL_TIMEZONE.key,
sparkSession.sessionState.conf.sessionLocalTimeZone)
hadoopConf.setBoolean(
SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key,
sparkSession.sessionState.conf.nestedSchemaPruningEnabled)
hadoopConf.setBoolean(
SQLConf.CASE_SENSITIVE.key,
sparkSession.sessionState.conf.caseSensitiveAnalysis)
ParquetWriteSupport.setSchema(readDataSchema, hadoopConf)
// Sets flags for `ParquetToSparkSchemaConverter`
hadoopConf.setBoolean(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp)
val broadcastedConf = sparkSession.sparkContext.broadcast(
new SerializableConfiguration(hadoopConf))
val enableAsyncIO = StarLakeUtils.enableAsyncIO(tableInfo.table_name, sparkSession.sessionState.conf)
val asyncFactoryName = "org.apache.spark.sql.execution.datasources.v2.parquet.ParquetPartitionAsyncReaderFactory"
val (hasAsyncClass, cls) = StarLakeUtils.getAsyncClass(asyncFactoryName)
if (enableAsyncIO && hasAsyncClass) {
logInfo("================ async bucket scan ==============================")
cls.getConstructors()(0)
.newInstance(sparkSession.sessionState.conf, broadcastedConf,
dataSchema, readDataSchema, readPartitionSchema, pushedFilters)
.asInstanceOf[PartitionReaderFactory]
} else {
logInfo("================ bucket scan no async ==============================")
ParquetPartitionReaderFactory(sparkSession.sessionState.conf, broadcastedConf,
dataSchema, readDataSchema, readPartitionSchema, pushedFilters)
}
}
override def equals(obj: Any): Boolean = obj match {
case p: BucketParquetScan =>
super.equals(p) && dataSchema == p.dataSchema && options == p.options &&
equivalentFilters(pushedFilters, p.pushedFilters)
case _ => false
}
override def hashCode(): Int = getClass.hashCode()
override def description(): String = {
super.description() + ", PushedFilters: " + seqToString(pushedFilters)
}
override def withFilters(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression]): FileScan =
this.copy(partitionFilters = partitionFilters, dataFilters = dataFilters)
override def partitions: Seq[FilePartition] = {
val selectedPartitions = fileIndex.listFiles(partitionFilters, dataFilters)
val partitionAttributes = fileIndex.partitionSchema.toAttributes
val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap
val readPartitionAttributes = readPartitionSchema.map { readField =>
attributeMap.getOrElse(normalizeName(readField.name),
throw new AnalysisException(s"Can't find required partition column ${readField.name} " +
s"in partition schema ${fileIndex.partitionSchema}")
)
}
lazy val partitionValueProject =
GenerateUnsafeProjection.generate(readPartitionAttributes, partitionAttributes)
val splitFiles = selectedPartitions.flatMap { partition =>
// Prune partition values if part of the partition columns are not required.
val partitionValues = if (readPartitionAttributes != partitionAttributes) {
partitionValueProject(partition.values).copy()
} else {
partition.values
}
partition.files.flatMap { file =>
val filePath = file.getPath
PartitionedFileUtil.splitFiles(
sparkSession = sparkSession,
file = file,
filePath = filePath,
isSplitable = false,
maxSplitBytes = 0L,
partitionValues = partitionValues
)
}.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
}
getFilePartitions(splitFiles)
}
private def getFilePartitions(partitionedFiles: Seq[PartitionedFile]): Seq[FilePartition] = {
val fileWithBucketId = partitionedFiles.map(file =>
(BucketingUtils
.getBucketId(new Path(file.filePath).getName)
.getOrElse(sys.error(s"Invalid bucket file ${file.filePath}")),
Array(file))).toMap
assert(fileWithBucketId.groupBy(_._1).forall(_._2.size == 1))
Seq.tabulate(tableInfo.bucket_num) { bucketId =>
FilePartition(bucketId, fileWithBucketId.getOrElse(bucketId, Array.empty))
}
}
private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
private def normalizeName(name: String): String = {
if (isCaseSensitive) {
name
} else {
name.toLowerCase(Locale.ROOT)
}
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/schema/SchemaEnforcementSuite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.schema
import java.io.File
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.test.{StarLakeSQLCommandTest, StarLakeTestUtils}
import org.apache.spark.sql.star.{SnapshotManagement, StarLakeOptions}
import org.apache.spark.sql.streaming.StreamingQueryException
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
sealed trait SaveOperation {
def apply(dfw: DataFrameWriter[_]): Unit
}
case class SaveWithPath(path: String = null) extends SaveOperation {
override def apply(dfw: DataFrameWriter[_]): Unit = {
if (path == null) dfw.save() else dfw.save(path)
}
}
case class SaveAsTable(tableName: String) extends SaveOperation {
override def apply(dfw: DataFrameWriter[_]): Unit = dfw.saveAsTable(tableName)
}
sealed trait SchemaEnforcementSuiteBase
extends QueryTest with SharedSparkSession with StarLakeTestUtils {
protected def enableAutoMigration(f: => Unit): Unit = {
withSQLConf(StarLakeSQLConf.SCHEMA_AUTO_MIGRATE.key -> "true") {
f
}
}
protected def disableAutoMigration(f: => Unit): Unit = {
withSQLConf(StarLakeSQLConf.SCHEMA_AUTO_MIGRATE.key -> "false") {
f
}
}
}
sealed trait BatchWriterTest extends SchemaEnforcementSuiteBase with SharedSparkSession {
def saveOperation: SaveOperation
implicit class RichDataFrameWriter(dfw: DataFrameWriter[_]) {
def append(path: File): Unit = {
saveOperation(dfw.format("star").mode("append").option("path", path.getAbsolutePath))
}
def overwrite(path: File): Unit = {
saveOperation(dfw.format("star").mode("overwrite").option("path", path.getAbsolutePath))
}
}
def equivalenceTest(testName: String)(f: => Unit): Unit = {
test(s"batch: $testName") {
saveOperation match {
case _: SaveWithPath => f
case SaveAsTable(tbl) => withTable(tbl) {
f
}
}
}
}
}
trait AppendSaveModeTests extends BatchWriterTest {
import testImplicits._
equivalenceTest("reject schema changes by default") {
disableAutoMigration {
withTempDir { dir =>
spark.range(10).write.append(dir)
val e = intercept[AnalysisException] {
spark.range(10).withColumn("part", 'id + 1).write.append(dir)
}
assert(e.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
}
}
equivalenceTest("allow schema changes when autoMigrate is enabled") {
enableAutoMigration {
withTempDir { dir =>
spark.range(10).write.append(dir)
spark.range(10).withColumn("part", 'id + 1).write.append(dir)
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
}
}
}
equivalenceTest("disallow schema changes when autoMigrate enabled but writer config disabled") {
enableAutoMigration {
withTempDir { dir =>
spark.range(10).write.append(dir)
val e = intercept[AnalysisException] {
spark.range(10).withColumn("part", 'id + 1).write
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "false").append(dir)
}
assert(e.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
}
}
equivalenceTest("allow schema change with option") {
disableAutoMigration {
withTempDir { dir =>
spark.range(10).write.append(dir)
spark.range(10).withColumn("part", 'id + 1).write
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true").append(dir)
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
}
}
}
equivalenceTest("JSON ETL workflow, schema merging NullTypes") {
enableAutoMigration {
val row1 = """{"key":"abc","id":null,"extra":1}"""
val row2 = """{"key":"def","id":2,"extra":null}"""
val row3 = """{"key":"ghi","id":null,"extra":3}"""
withTempDir { dir =>
val schema1 = new StructType()
.add("key", StringType).add("id", NullType).add("extra", IntegerType)
val schema2 = new StructType()
.add("key", StringType).add("id", IntegerType).add("extra", NullType)
spark.read.schema(schema1).json(Seq(row1).toDS()).write.append(dir)
spark.read.schema(schema2).json(Seq(row2).toDS()).write.append(dir)
spark.read.schema(schema1).json(Seq(row3).toDS()).write.append(dir)
checkAnswer(
spark.read.format("star").load(dir.getAbsolutePath),
Row("abc", null, 1) :: Row("def", 2, null) :: Row("ghi", null, 3) :: Nil
)
}
}
}
equivalenceTest("JSON ETL workflow, schema merging NullTypes - nested struct") {
enableAutoMigration {
val row1 = """{"key":"abc","top":{"id":null,"extra":1}}"""
val row2 = """{"key":"def","top":{"id":2,"extra":null}}"""
val row3 = """{"key":"ghi","top":{"id":null,"extra":3}}"""
withTempDir { dir =>
val schema1 = new StructType().add("key", StringType)
.add("top", new StructType().add("id", NullType).add("extra", IntegerType))
val schema2 = new StructType().add("key", StringType)
.add("top", new StructType().add("id", IntegerType).add("extra", NullType))
val mergedSchema = new StructType().add("key", StringType)
.add("top", new StructType().add("id", IntegerType).add("extra", IntegerType))
spark.read.schema(schema1).json(Seq(row1).toDS()).write.append(dir)
spark.read.schema(schema2).json(Seq(row2).toDS()).write.append(dir)
assert(spark.read.format("star").load(dir.getAbsolutePath).schema === mergedSchema)
spark.read.schema(schema1).json(Seq(row3).toDS()).write.append(dir)
assert(spark.read.format("star").load(dir.getAbsolutePath).schema === mergedSchema)
checkAnswer(
spark.read.format("star").load(dir.getAbsolutePath),
Row("abc", Row(null, 1)) :: Row("def", Row(2, null)) :: Row("ghi", Row(null, 3)) :: Nil
)
}
}
}
equivalenceTest("JSON ETL workflow, schema merging NullTypes - throw error on complex types") {
enableAutoMigration {
val row1 = """{"key":"abc","top":[]}"""
val row2 = """{"key":"abc","top":[{"id":null}]}"""
withTempDir { dir =>
val schema1 = new StructType().add("key", StringType).add("top", ArrayType(NullType))
val schema2 = new StructType().add("key", StringType)
.add("top", ArrayType(new StructType().add("id", NullType)))
val e1 = intercept[AnalysisException] {
spark.read.schema(schema1).json(Seq(row1).toDS()).write.append(dir)
}
assert(e1.getMessage.contains("NullType"))
val e2 = intercept[AnalysisException] {
spark.read.schema(schema2).json(Seq(row2).toDS()).write.append(dir)
}
assert(e2.getMessage.contains("NullType"))
}
}
}
equivalenceTest("JSON ETL workflow, NullType being only data column") {
enableAutoMigration {
val row1 = """{"key":"abc","id":null}"""
withTempDir { dir =>
val schema1 = new StructType().add("key", StringType).add("id", NullType)
val e1 = intercept[AnalysisException] {
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("rangePartitions", "key").append(dir)
}
assert(e1.getMessage.contains("NullType have been dropped"))
val e2 = intercept[AnalysisException] {
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("hashPartitions", "key")
.option("hashBucketNum", "2")
.append(dir)
}
assert(e2.getMessage.contains("NullType have been dropped"))
}
}
}
equivalenceTest("JSON ETL workflow, NullType partition column should fail") {
enableAutoMigration {
val row1 = """{"key":"abc","id":null}"""
withTempDir { dir =>
val schema1 = new StructType().add("key", StringType).add("id", NullType)
intercept[AnalysisException] {
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("rangePartitions", "id")
.append(dir)
}
intercept[AnalysisException] {
// check case sensitivity with regards to column dropping
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("rangePartitions", "iD")
.append(dir)
}
intercept[AnalysisException] {
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("hashPartitions", "id")
.option("hashBucketNum", "2")
.append(dir)
}
intercept[AnalysisException] {
// check case sensitivity with regards to column dropping
spark.read.schema(schema1).json(Seq(row1).toDS()).write
.option("hashPartitions", "iD")
.option("hashBucketNum", "2")
.append(dir)
}
}
}
}
equivalenceTest("reject columns that only differ by case - append") {
withTempDir { dir =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
intercept[AnalysisException] {
spark.range(10).withColumn("ID", 'id + 1).write.append(dir)
}
intercept[AnalysisException] {
spark.range(10).withColumn("ID", 'id + 1).write
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true").append(dir)
}
intercept[AnalysisException] {
spark.range(10).withColumn("a", 'id + 1).write
.option("rangePartitions", "a,A")
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true").append(dir)
}
intercept[AnalysisException] {
spark.range(10).withColumn("a", 'id + 1).write
.option("hashPartitions", "a,A")
.option("hashBucketNum", "2")
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true").append(dir)
}
}
}
}
equivalenceTest("ensure schema mismatch error message contains table ID") {
disableAutoMigration {
withTempDir { dir =>
spark.range(10).write.append(dir)
val e = intercept[AnalysisException] {
spark.range(10).withColumn("part", 'id + 1).write.append(dir)
}
assert(e.getMessage.contains("schema mismatch detected"))
// assert(e.getMessage.contains(s"Table ID: ${StarLakeTable.forPath(spark, dir.getAbsolutePath).tableId}"))
}
}
}
}
trait AppendOutputModeTests extends SchemaEnforcementSuiteBase with SharedSparkSession
with StarLakeSQLCommandTest {
import testImplicits._
test("reject schema changes by default - streaming") {
withTempDir { dir =>
spark.range(10).write.format("star").save(dir.getAbsolutePath)
val memStream = MemoryStream[Long]
val stream = memStream.toDS().toDF("value1234") // different column name
.writeStream
.option("checkpointLocation", new File(dir, "_checkpoint").getAbsolutePath)
.format("star")
.start(dir.getAbsolutePath)
try {
disableAutoMigration {
val e = intercept[StreamingQueryException] {
memStream.addData(1L)
stream.processAllAvailable()
}
assert(e.cause.isInstanceOf[AnalysisException])
assert(e.cause.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
} finally {
stream.stop()
}
}
}
test("reject schema changes when autoMigrate enabled but writer config disabled") {
withTempDir { dir =>
spark.range(10).write.format("star").save(dir.getAbsolutePath)
val memStream = MemoryStream[Long]
val stream = memStream.toDS().toDF("value1234") // different column name
.writeStream
.option("checkpointLocation", new File(dir, "_checkpoint").getAbsolutePath)
.format("star")
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "false")
.start(dir.getAbsolutePath)
try {
enableAutoMigration {
val e = intercept[StreamingQueryException] {
memStream.addData(1L)
stream.processAllAvailable()
}
assert(e.cause.isInstanceOf[AnalysisException])
assert(e.cause.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
} finally {
stream.stop()
}
}
}
test("allow schema changes when autoMigrate is enabled - streaming") {
withTempDir { dir =>
spark.range(10).write.format("star").save(dir.getAbsolutePath)
enableAutoMigration {
val memStream = MemoryStream[Long]
val stream = memStream.toDS().toDF("value1234") // different column name
.writeStream
.option("checkpointLocation", new File(dir, "_checkpoint").getAbsolutePath)
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(1L)
stream.processAllAvailable()
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
} finally {
stream.stop()
}
}
}
}
test("allow schema change with option - streaming") {
withTempDir { dir =>
spark.range(10).write.format("star").save(dir.getAbsolutePath)
val memStream = MemoryStream[Long]
val stream = memStream.toDS().toDF("value1234") // different column name
.writeStream
.option("checkpointLocation", new File(dir, "_checkpoint").getAbsolutePath)
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true")
.format("star")
.start(dir.getAbsolutePath)
try {
disableAutoMigration {
memStream.addData(1L)
stream.processAllAvailable()
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
}
} finally {
stream.stop()
}
}
}
test("JSON ETL workflow, reject NullTypes") {
enableAutoMigration {
val row1 = """{"key":"abc","id":null}"""
withTempDir(checkpointDir => {
withTempDir { dir =>
val schema = new StructType().add("key", StringType).add("id", NullType)
val memStream = MemoryStream[String]
val stream = memStream.toDS().select(from_json('value, schema).as("value"))
.select($"value.*")
.writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.format("star")
.start(dir.getAbsolutePath)
try {
val e = intercept[StreamingQueryException] {
memStream.addData(row1)
stream.processAllAvailable()
}
assert(e.cause.isInstanceOf[AnalysisException])
assert(e.cause.getMessage.contains("NullType"))
} finally {
stream.stop()
}
}
})
}
}
test("JSON ETL workflow, reject NullTypes on nested column") {
enableAutoMigration {
val row1 = """{"key":"abc","id":{"a":null}}"""
withTempDir(checkpointDir => {
withTempDir { dir =>
val schema = new StructType().add("key", StringType)
.add("id", new StructType().add("a", NullType))
val memStream = MemoryStream[String]
val stream = memStream.toDS().select(from_json('value, schema).as("value"))
.select($"value.*")
.writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.format("star")
.start(dir.getAbsolutePath)
try {
val e = intercept[StreamingQueryException] {
memStream.addData(row1)
stream.processAllAvailable()
}
assert(e.cause.isInstanceOf[AnalysisException])
assert(e.cause.getMessage.contains("NullType"))
} finally {
stream.stop()
}
}
})
}
}
}
trait OverwriteSaveModeTests extends BatchWriterTest {
import testImplicits._
equivalenceTest("reject schema overwrites by default") {
disableAutoMigration {
withTempDir { dir =>
spark.range(10).write.overwrite(dir)
val e = intercept[AnalysisException] {
spark.range(10).withColumn("part", 'id + 1).write.overwrite(dir)
}
assert(e.getMessage.contains(StarLakeOptions.OVERWRITE_SCHEMA_OPTION))
}
}
}
equivalenceTest("can overwrite schema when using overwrite mode - option") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").write.overwrite(dir)
spark.range(5).toDF("value").write.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
val df = spark.read.format("star").load(dir.getAbsolutePath)
assert(df.schema.fieldNames === Array("value"))
}
}
}
equivalenceTest("when autoMerge sqlConf is enabled, we merge schemas") {
enableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").write.overwrite(dir)
spark.range(5).toDF("value").write.overwrite(dir)
val df = spark.read.format("star").load(dir.getAbsolutePath)
assert(df.schema.fieldNames === Array("id", "value"))
}
}
}
equivalenceTest("reject migration when autoMerge sqlConf is enabled and writer config disabled") {
enableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").write.overwrite(dir)
intercept[AnalysisException] {
spark.range(5).toDF("value").write.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "false")
.overwrite(dir)
}
val df = spark.read.format("star").load(dir.getAbsolutePath)
assert(df.schema.fieldNames === Array("id"))
}
}
}
equivalenceTest("schema merging with replaceWhere - sqlConf") {
enableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option(StarLakeOptions.REPLACE_WHERE_OPTION, "part = 0")
.overwrite(dir)
val df = spark.read.format("star").load(dir.getAbsolutePath).select("id", "part", "value")
assert(df.schema.fieldNames === Array("id", "part", "value"))
}
}
}
equivalenceTest("schema merging with replaceWhere - option") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option(StarLakeOptions.REPLACE_WHERE_OPTION, "part = 0")
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "true")
.overwrite(dir)
val df = spark.read.format("star").load(dir.getAbsolutePath).select("id", "part", "value")
assert(df.schema.fieldNames === Array("id", "part", "value"))
}
}
}
equivalenceTest("schema merging with replaceWhere - option case insensitive") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option("RePlAcEwHeRe", "part = 0")
.option("mErGeScHeMa", "true")
.overwrite(dir)
val df = spark.read.format("star").load(dir.getAbsolutePath).select("id", "part", "value")
assert(df.schema.fieldNames === Array("id", "part", "value"))
}
}
}
equivalenceTest("reject schema merging with replaceWhere - overwrite option") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
val e = intercept[AnalysisException] {
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option(StarLakeOptions.REPLACE_WHERE_OPTION, "part = 0")
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
}
assert(e.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
}
}
equivalenceTest("reject schema merging with replaceWhere - no option") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
val e = intercept[AnalysisException] {
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option("rangePartitions", "part")
.option(StarLakeOptions.REPLACE_WHERE_OPTION, "part = 0")
.overwrite(dir)
}
assert(e.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
}
}
equivalenceTest("reject schema merging with replaceWhere - option set to false, config true") {
enableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option("rangePartitions", "part")
.overwrite(dir)
val e = intercept[AnalysisException] {
Seq((1L, 0L), (2L, 0L)).toDF("value", "part").write
.option("rangePartitions", "part")
.option(StarLakeOptions.REPLACE_WHERE_OPTION, "part = 0")
.option(StarLakeOptions.MERGE_SCHEMA_OPTION, "false")
.overwrite(dir)
}
assert(e.getMessage.contains(StarLakeOptions.MERGE_SCHEMA_OPTION))
}
}
}
equivalenceTest("reject change partition columns with overwrite - sqlConf or option") {
enableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id").write
.overwrite(dir)
val e1 = intercept[AnalysisException] {
spark.range(5).toDF("id").withColumn("part", 'id % 2).write
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.option("rangePartitions", "part")
.overwrite(dir)
}
assert(e1.getMessage.contains("partition columns"))
val e2 = intercept[AnalysisException] {
spark.range(5).toDF("id")
.withColumn("part", 'id % 2)
.write
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.option("hashPartitions", "part")
.option("hashBucketNum", "2")
.overwrite(dir)
}
assert(e2.getMessage.contains("partition columns"))
val snapshotManagement = SnapshotManagement(dir.getAbsolutePath)
assert(snapshotManagement.snapshot.getTableInfo.range_partition_columns === Nil)
assert(snapshotManagement.snapshot.getTableInfo.schema.fieldNames === Array("id"))
}
}
}
equivalenceTest("reject set hash partitioning without bucket num") {
disableAutoMigration {
withTempDir { dir =>
val e = intercept[AnalysisException] {
spark.range(5).toDF("id")
.withColumn("hash", 'id % 3)
.write
.option("hashPartitions", "hash")
.overwrite(dir)
}
assert(e.getMessage.contains(StarLakeOptions.HASH_BUCKET_NUM))
}
}
}
equivalenceTest("can drop data columns with overwriteSchema") {
disableAutoMigration {
withTempDir { dir =>
spark.range(5).toDF("id")
.withColumn("part", 'id % 2)
.write
.overwrite(dir)
spark.range(5).toDF("id").write
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
val snapshotManagement = SnapshotManagement(dir.getAbsolutePath)
assert(snapshotManagement.snapshot.getTableInfo.range_partition_columns === Nil)
assert(snapshotManagement.snapshot.getTableInfo.schema.fieldNames === Array("id"))
}
}
}
equivalenceTest("can change column data type with overwriteSchema") {
disableAutoMigration {
withTempDir { dir =>
val snapshotManagement = SnapshotManagement(dir.getAbsolutePath)
spark.range(5).toDF("id").write
.overwrite(dir)
assert(snapshotManagement.updateSnapshot()
.getTableInfo.schema.head === StructField("id", LongType))
spark.range(5).toDF("id").selectExpr("cast(id as string) as id").write
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
assert(snapshotManagement.updateSnapshot()
.getTableInfo.schema.head === StructField("id", StringType))
}
}
}
equivalenceTest("reject columns that only differ by case - overwrite") {
withTempDir { dir =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
intercept[AnalysisException] {
spark.range(10).withColumn("ID", 'id + 1).write.overwrite(dir)
}
intercept[AnalysisException] {
spark.range(10).withColumn("ID", 'id + 1).write
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
}
intercept[AnalysisException] {
spark.range(10).withColumn("a", 'id + 1).write
.option("rangePartitions", "a,A")
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.overwrite(dir)
}
}
}
}
}
trait CompleteOutputModeTests extends SchemaEnforcementSuiteBase with SharedSparkSession
with StarLakeSQLCommandTest {
import testImplicits._
test("reject complete mode with new schema by default") {
disableAutoMigration {
withTempDir(checkpointDir => {
withTempDir { dir =>
val memStream = MemoryStream[Long]
val query = memStream.toDS().toDF("id")
.withColumn("part", 'id % 3)
.groupBy("part")
.count()
val stream1 = query.writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(1L)
stream1.processAllAvailable()
} finally {
stream1.stop()
}
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
val stream2 = query.withColumn("test", lit("abc")).writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
val e = intercept[StreamingQueryException] {
memStream.addData(2L)
stream2.processAllAvailable()
}
assert(e.cause.isInstanceOf[AnalysisException])
assert(e.cause.getMessage.contains(StarLakeOptions.OVERWRITE_SCHEMA_OPTION))
} finally {
stream2.stop()
}
}
})
}
}
test("complete mode can overwrite schema with option") {
disableAutoMigration {
withTempDir(checkpointDir => {
withTempDir { dir =>
val memStream = MemoryStream[Long]
val query = memStream.toDS().toDF("id")
.withColumn("part", 'id % 3)
.groupBy("part")
.count()
val stream1 = query.writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(1L)
stream1.processAllAvailable()
} finally {
stream1.stop()
}
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
val stream2 = query.withColumn("test", lit("abc")).writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.option(StarLakeOptions.OVERWRITE_SCHEMA_OPTION, "true")
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(2L)
stream2.processAllAvailable()
memStream.addData(3L)
stream2.processAllAvailable()
} finally {
stream2.stop()
}
val df = spark.read.format("star").load(dir.getAbsolutePath)
assert(df.schema.length == 3)
checkAnswer(
df,
Row(0L, 1L, "abc") :: Row(1L, 1L, "abc") :: Row(2L, 1L, "abc") :: Nil)
}
})
}
}
test("complete mode behavior with autoMigrate enabled is to migrate schema") {
enableAutoMigration {
withTempDir(checkpointDir => {
withTempDir { dir =>
val memStream = MemoryStream[Long]
val query = memStream.toDS().toDF("id")
.withColumn("part", 'id % 3)
.groupBy("part")
.count()
val stream1 = query.writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(1L)
stream1.processAllAvailable()
} finally {
stream1.stop()
}
assert(spark.read.format("star").load(dir.getAbsolutePath).schema.length == 2)
val stream2 = query.withColumn("test", lit("abc")).writeStream
.option("checkpointLocation", new File(checkpointDir, "_checkpoint").getAbsolutePath)
.outputMode("complete")
.format("star")
.start(dir.getAbsolutePath)
try {
memStream.addData(2L)
stream2.processAllAvailable()
memStream.addData(3L)
stream2.processAllAvailable()
} finally {
stream2.stop()
}
val df = spark.read.format("star").load(dir.getAbsolutePath)
assert(df.schema.length == 3)
checkAnswer(
df,
Row(0L, 1L, "abc") :: Row(1L, 1L, "abc") :: Row(2L, 1L, "abc") :: Nil)
}
})
}
}
}
class SchemaEnforcementWithPathSuite extends AppendSaveModeTests with OverwriteSaveModeTests {
override val saveOperation = SaveWithPath()
}
class SchemaEnforcementStreamingSuite
extends AppendOutputModeTests
with CompleteOutputModeTests {
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/MergeLogic.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.InternalRow.getAccessor
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.v2.merge.{KeyIndex, MergePartitionedFile}
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarBatch
import scala.collection.BufferedIterator
trait MergeLogic {
def getValueByType(row: InternalRow, fieldIndex: Int, dataType: DataType): Any = {
dataType match {
case StringType => if (row.isNullAt(fieldIndex)) null else row.getUTF8String(fieldIndex).clone()
case IntegerType | DateType => if (row.isNullAt(fieldIndex)) null else row.getInt(fieldIndex)
case BooleanType => if (row.isNullAt(fieldIndex)) null else row.getBoolean(fieldIndex)
case ByteType => if (row.isNullAt(fieldIndex)) null else row.getByte(fieldIndex)
case ShortType => if (row.isNullAt(fieldIndex)) null else row.getShort(fieldIndex)
case LongType | TimestampType => if (row.isNullAt(fieldIndex)) null else row.getLong(fieldIndex)
case FloatType => if (row.isNullAt(fieldIndex)) null else row.getFloat(fieldIndex)
case DoubleType => if (row.isNullAt(fieldIndex)) null else row.getDouble(fieldIndex)
case BinaryType => if (row.isNullAt(fieldIndex)) null else row.getBinary(fieldIndex)
case CalendarIntervalType => if (row.isNullAt(fieldIndex)) null else row.getInterval(fieldIndex)
case t: DecimalType => if (row.isNullAt(fieldIndex)) null else row.getDecimal(fieldIndex, t.precision, t.scale)
case t: StructType => if (row.isNullAt(fieldIndex)) null else row.getStruct(fieldIndex, t.size)
case _: ArrayType => if (row.isNullAt(fieldIndex)) null else row.getArray(fieldIndex)
case _: MapType => if (row.isNullAt(fieldIndex)) null else row.getMap(fieldIndex)
case u: UserDefinedType[_] => getAccessor(u.sqlType, true)
case _ => if (row.isNullAt(fieldIndex)) null else row.get(fieldIndex, dataType)
}
}
/** The file readers have been read should be closed at once. */
def closeReadFileReader(): Unit
}
import scala.collection.JavaConverters._
class MergeSingletonFile(filesInfo: Seq[(MergePartitionedFile, PartitionReader[ColumnarBatch])]) extends MergeLogic {
//initialize index
val keyInfoArray: Array[KeyIndex] = filesInfo.head._1.keyInfo.toArray
val typeArray: Array[DataType] = filesInfo.head._1.fileInfo.map(_.fieldType).toArray
var temporaryRow: Array[Any] = new Array[Any](filesInfo.head._1.resultSchema.length)
// get next batch
var fileSeq: Seq[(MergePartitionedFile, ColumnarBatch)] = MergeUtils.getNextBatch(filesInfo)
val fileSchema: Seq[String] = filesInfo.head._1.fileInfo.map(_.fieldName)
val resIndex: Array[Int] = filesInfo.head._1.resultSchema.map(_.fieldName).map(schema => {
fileSchema.indexOf(schema)
}).toArray
val emptyBatch: Boolean = fileSeq.isEmpty
var temporaryStoreLastRow = false
var singletonBatch: SingletonFileColumnarBatch = _
var bufferedIt: BufferedIterator[(InternalRow, Int)] = _
if (fileSeq.nonEmpty) {
singletonBatch = initMergeBatch(fileSeq.head, resIndex)
bufferedIt = fileSeq.head._2.rowIterator().asScala.zipWithIndex.buffered
}
var rowId: Int = -1
def deDuplication(): Boolean = {
if (emptyBatch) {
return false
}
var lastKey: String = null
rowId = -1
while (true) {
if (bufferedIt.hasNext) {
val currentRow = bufferedIt.head._1
if (StringUtils.isEmpty(lastKey)) {
lastKey = combineKey(currentRow)
rowId = bufferedIt.head._2
} else {
if (combineKey(currentRow).equals(lastKey)) {
if (temporaryStoreLastRow) temporaryStoreLastRow = false
rowId = bufferedIt.head._2
} else {
return true
}
}
bufferedIt.next()
} else {
if (rowId == -1) return false
val tempRow = getRow()
resIndex.indices.foreach(i => {
if (resIndex(i) == -1) {
temporaryRow(i) = null
} else {
temporaryRow(i) = getValueByType(tempRow, i, typeArray(resIndex(i)))
}
})
temporaryStoreLastRow = true
fileSeq = MergeUtils.getNextBatch(filesInfo)
if (fileSeq.nonEmpty) {
bufferedIt = fileSeq.head._2.rowIterator().asScala.zipWithIndex.buffered
singletonBatch = initMergeBatch(fileSeq.head, resIndex)
} else {
return true
}
}
}
false
}
def combineKey(row: InternalRow): String = {
keyInfoArray.map(keyIndex => {
row.get(keyIndex.index, keyIndex.keyType).toString
})
.reduce(_.concat(_))
}
def getRow(): InternalRow = {
if (temporaryStoreLastRow) {
val batchLastRow = new GenericInternalRow(temporaryRow.clone())
temporaryRow.indices.foreach(temporaryRow(_) = null)
temporaryStoreLastRow = false
batchLastRow
} else {
singletonBatch.getRow(rowId)
}
}
def initMergeBatch(file: (MergePartitionedFile, ColumnarBatch), resIndex: Array[Int]): SingletonFileColumnarBatch = {
val columnArr =
resIndex.map(res => {
if (res == -1) {
null
} else {
file._2.column(res)
}
})
new SingletonFileColumnarBatch(columnArr)
}
override def closeReadFileReader(): Unit = {
filesInfo.foreach(f => f._2.close())
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/commands/CleanupSuite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import java.io.File
import com.engineplus.star.tables.{StarTable, StarTableTestUtils}
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.sources.StarLakeSQLConf
import org.apache.spark.sql.star.test.StarLakeTestUtils
import org.apache.spark.sql.test.SharedSparkSession
import org.scalatest.BeforeAndAfterEach
class CleanupSuite extends QueryTest
with SharedSparkSession with BeforeAndAfterEach
with StarLakeTestUtils {
import testImplicits._
def writeData(tablePath: String): Unit = {
spark.range(10).write.mode("overwrite").format("star").save(tablePath)
}
test("cleanup basic test") {
withSQLConf(StarLakeSQLConf.OLD_VERSION_RETENTION_TIME.key -> "1") {
withTempDir(tempDir => {
val tablePath = tempDir.getCanonicalPath
writeData(tablePath)
val snapshotManagement = SnapshotManagement(tablePath)
//add unrelated file manually
val deleteDir = new File(tempDir.getAbsolutePath, "reservoir")
assert(deleteDir.mkdirs())
val fs = new Path(tablePath).getFileSystem(spark.sessionState.newHadoopConf())
var result = CleanupCommand.runCleanup(spark, snapshotManagement)
val deleteFile = fs.makeQualified(new Path(deleteDir.toString)).toString
checkDatasetUnorderly(result.as[String], deleteFile)
//clean old version expire files
val dataInfo = snapshotManagement.updateSnapshot().allDataInfo
var oldFiles = dataInfo.map(_.file_path) ++ Seq(deleteFile)
val fileNum = dataInfo.length
writeData(tablePath)
result = CleanupCommand.runCleanup(spark, snapshotManagement)
checkDatasetUnorderly(result.as[String], oldFiles: _*)
oldFiles = snapshotManagement.updateSnapshot().allDataInfo.map(_.file_path) ++ oldFiles
writeData(tablePath)
result = CleanupCommand.runCleanup(spark, snapshotManagement)
checkDatasetUnorderly(result.as[String], oldFiles: _*)
oldFiles = snapshotManagement.updateSnapshot().allDataInfo.map(_.file_path) ++ oldFiles
writeData(tablePath)
result = CleanupCommand.runCleanup(spark, snapshotManagement)
checkDatasetUnorderly(result.as[String], oldFiles: _*)
oldFiles = snapshotManagement.updateSnapshot().allDataInfo.map(_.file_path) ++ oldFiles
writeData(tablePath)
result = CleanupCommand.runCleanup(spark, snapshotManagement)
checkDatasetUnorderly(result.as[String], oldFiles: _*)
CleanupCommand.runCleanup(spark, snapshotManagement, false)
assert(
StarTableTestUtils
.getNumByTableId("data_info", snapshotManagement.snapshot.getTableInfo.table_id)
== fileNum
)
})
}
}
test("simple cleanup") {
withSQLConf(StarLakeSQLConf.OLD_VERSION_RETENTION_TIME.key -> "1") {
withTempDir(dir => {
val tablePath = dir.getCanonicalPath
Seq((1, 1, 1), (1, 2, 2), (2, 3, 3)).toDF("range", "hash", "value")
.write.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.save(tablePath)
val snapshotManagement = SnapshotManagement(tablePath)
val oldFile = new Path(snapshotManagement.snapshot.allDataInfo.head.file_path).toUri
Seq((1, 1, 1), (1, 2, 2), (2, 3, 3)).toDF("range", "hash", "value")
.write.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "hash")
.option("hashBucketNum", "1")
.save(tablePath)
val newFile = new Path(snapshotManagement.updateSnapshot().allDataInfo.head.file_path).toUri
Thread.sleep(1000)
StarTable.forPath(tablePath).cleanup()
assert(!new File(oldFile).exists())
assert(new File(newFile).exists())
})
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/catalog/StarLakeTableV2.scala
|
<filename>src/main/scala/org/apache/spark/sql/star/catalog/StarLakeTableV2.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.catalog
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogUtils}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.connector.write._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.sources.{BaseRelation, Filter, InsertableRelation}
import org.apache.spark.sql.star._
import org.apache.spark.sql.star.commands.WriteIntoTable
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.sources.{StarLakeDataSource, StarLakeSourceUtils}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.{AnalysisException, DataFrame, SaveMode, SparkSession}
import scala.collection.JavaConverters._
import scala.collection.mutable
case class StarLakeTableV2(spark: SparkSession,
path: Path,
catalogTable: Option[CatalogTable] = None,
tableIdentifier: Option[String] = None,
userDefinedFileIndex: Option[StarLakeFileIndexV2] = None,
var mergeOperatorInfo: Option[Map[String, String]] = None)
extends Table with SupportsWrite with SupportsRead {
private lazy val (rootPath, partitionFilters) =
if (catalogTable.isDefined) {
// Fast path for reducing path munging overhead
(new Path(catalogTable.get.location), Nil)
} else {
StarLakeDataSource.parsePathIdentifier(spark, path.toString)
}
// The loading of the SnapshotManagement is lazy in order to reduce the amount of FileSystem calls,
// in cases where we will fallback to the V1 behavior.
lazy val snapshotManagement: SnapshotManagement = SnapshotManagement(rootPath)
// def getTableIdentifierIfExists: Option[TableIdentifier] = tableIdentifier.map(
// spark.sessionState.sqlParser.parseTableIdentifier)
override def name(): String = catalogTable.map(_.identifier.unquotedString)
.orElse(tableIdentifier)
.getOrElse(s"star.`${snapshotManagement.table_name}`")
private lazy val snapshot: Snapshot = snapshotManagement.snapshot
override def schema(): StructType =
StructType(snapshot.getTableInfo.data_schema ++ snapshot.getTableInfo.range_partition_schema)
private lazy val dataSchema: StructType = snapshot.getTableInfo.data_schema
private lazy val fileIndex: StarLakeFileIndexV2 = {
if (userDefinedFileIndex.isDefined) {
userDefinedFileIndex.get
} else {
DataFileIndexV2(spark, snapshotManagement)
}
}
override def partitioning(): Array[Transform] = {
snapshot.getTableInfo.range_partition_columns.map { col =>
new IdentityTransform(new FieldReference(Seq(col)))
}.toArray
}
override def properties(): java.util.Map[String, String] = {
val base = new java.util.HashMap[String, String]()
snapshot.getTableInfo.configuration.foreach { case (k, v) =>
if (k != "path") {
base.put(k, v)
}
}
base.put(TableCatalog.PROP_PROVIDER, "star")
base.put(TableCatalog.PROP_LOCATION, CatalogUtils.URIToString(path.toUri))
// Option(snapshot.getTableInfo.description).foreach(base.put(TableCatalog.PROP_COMMENT, _))
base
}
override def capabilities(): java.util.Set[TableCapability] = Set(
ACCEPT_ANY_SCHEMA, BATCH_READ, //BATCH_WRITE, OVERWRITE_DYNAMIC,
V1_BATCH_WRITE, OVERWRITE_BY_FILTER, TRUNCATE
).asJava
override def newScanBuilder(options: CaseInsensitiveStringMap): StarLakeScanBuilder = {
if (mergeOperatorInfo.getOrElse(Map.empty[String, String]).nonEmpty) {
assert(
snapshot.getTableInfo.hash_partition_columns.nonEmpty,
"Merge operator should be used with hash partitioned table")
val fields = schema().fieldNames
mergeOperatorInfo.get.map(_._1.replaceFirst(StarLakeUtils.MERGE_OP_COL, ""))
.foreach(info => {
if (!fields.contains(info)) {
throw StarLakeErrors.useMergeOperatorForNonStarTableField(info)
}
})
}
val newOptions = options.asCaseSensitiveMap().asScala ++
mergeOperatorInfo.getOrElse(Map.empty[String, String])
StarLakeScanBuilder(spark, fileIndex, schema(), dataSchema,
new CaseInsensitiveStringMap(newOptions.asJava), snapshot.getTableInfo)
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new WriteIntoTableBuilder(snapshotManagement, info.options)
}
/**
* Creates a V1 BaseRelation from this Table to allow read APIs to go through V1 DataSource code
* paths.
*/
def toBaseRelation: BaseRelation = {
val partitionPredicates = StarLakeDataSource.verifyAndCreatePartitionFilters(
path.toString, snapshotManagement.snapshot, partitionFilters)
snapshotManagement.createRelation(partitionPredicates)
}
}
private class WriteIntoTableBuilder(snapshotManagement: SnapshotManagement,
writeOptions: CaseInsensitiveStringMap)
extends WriteBuilder with V1WriteBuilder with SupportsOverwrite with SupportsTruncate {
private var forceOverwrite = false
private val options =
mutable.HashMap[String, String](writeOptions.asCaseSensitiveMap().asScala.toSeq: _*)
override def truncate(): WriteIntoTableBuilder = {
forceOverwrite = true
this
}
override def overwrite(filters: Array[Filter]): WriteBuilder = {
if (writeOptions.containsKey("replaceWhere")) {
throw new AnalysisException(
"You can't use replaceWhere in conjunction with an overwrite by filter")
}
options.put("replaceWhere", StarLakeSourceUtils.translateFilters(filters).sql)
forceOverwrite = true
this
}
override def buildForV1Write(): InsertableRelation = {
new InsertableRelation {
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
val session = data.sparkSession
WriteIntoTable(
snapshotManagement,
if (forceOverwrite) SaveMode.Overwrite else SaveMode.Append,
new StarLakeOptions(options.toMap, session.sessionState.conf),
// Nil,
snapshotManagement.snapshot.getTableInfo.configuration,
data).run(session)
// TODO: Push this to Apache Spark
// Re-cache all cached plans(including this relation itself, if it's cached) that refer
// to this data source relation. This is the behavior for InsertInto
session.sharedState.cacheManager.recacheByPlan(
session, LogicalRelation(snapshotManagement.createRelation()))
}
}
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/commands/DeleteScalaSuite.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import com.engineplus.star.tables
import com.engineplus.star.tables.{StarTableTestUtils, StarTable}
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.{Row, functions}
class DeleteScalaSuite extends DeleteSuiteBase with StarLakeSQLCommandTest {
import testImplicits._
test("delete cached table by path") {
Seq((2, 2), (1, 4)).toDF("key", "value")
.write.mode("overwrite").format("star").save(tempPath)
spark.read.format("star").load(tempPath).cache()
spark.read.format("star").load(tempPath).collect()
executeDelete(s"star.`$tempPath`", where = "key = 2")
checkAnswer(spark.read.format("star").load(tempPath), Row(1, 4) :: Nil)
}
test("delete usage test - without condition") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = StarTable.forPath(tempPath)
table.delete()
checkAnswer(readStarLakeTable(tempPath), Nil)
}
test("delete usage test - with condition") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.delete("key = 1 or key = 2")
checkAnswer(readStarLakeTable(tempPath), Row(3, 30) :: Row(4, 40) :: Nil)
}
test("delete usage test - with Column condition") {
append(Seq((1, 10), (2, 20), (3, 30), (4, 40)).toDF("key", "value"))
val table = tables.StarTable.forPath(tempPath)
table.delete(functions.expr("key = 1 or key = 2"))
checkAnswer(readStarLakeTable(tempPath), Row(3, 30) :: Row(4, 40) :: Nil)
}
override protected def executeDelete(target: String, where: String = null): Unit = {
def parse(tableNameWithAlias: String): (String, Option[String]) = {
tableNameWithAlias.split(" ").toList match {
case tableName :: Nil => tableName -> None // just table name
case tableName :: alias :: Nil => // tablename SPACE alias OR tab SPACE lename
val ordinary = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')).toSet
if (!alias.forall(ordinary.contains(_))) {
(tableName + " " + alias) -> None
} else {
tableName -> Some(alias)
}
case _ =>
fail(s"Could not build parse '$tableNameWithAlias' for table and optional alias")
}
}
val starTable: StarTable = {
val (tableNameOrPath, optionalAlias) = parse(target)
val isPath: Boolean = tableNameOrPath.startsWith("star.")
val table = if (isPath) {
val path = tableNameOrPath.stripPrefix("star.`").stripSuffix("`")
tables.StarTable.forPath(spark, path)
} else {
StarTableTestUtils.createTable(spark.table(tableNameOrPath),
SnapshotManagement(tableNameOrPath))
}
optionalAlias.map(table.as(_)).getOrElse(table)
}
if (where != null) {
starTable.delete(where)
} else {
starTable.delete()
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/exception/MetaRerunErrors.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.exception
import org.apache.spark.sql.star.utils.PartitionInfo
object MetaRerunErrors {
def fileChangedException(info: PartitionInfo,
file_path: String,
write_version: Long,
commit_id: String): MetaRerunException = {
new MetaRerunException(
s"""
|Error: Another job added file "$file_path" in partition: "${info.range_value}" during write_version=$write_version, but your read_version is ${info.read_version}.
|Commit id=$commit_id failed to update meta because of data info conflict. Please update and retry.
|Error table: ${info.table_name}.
""".stripMargin,
commit_id)
}
def fileDeletedException(info: PartitionInfo,
file_path: String,
write_version: Long,
commit_id: String): MetaRerunException = {
new MetaRerunException(
s"""
|Error: File "$file_path" in partition: "${info.range_value}" deleted by another job during write_version=$write_version, but your read_version is ${info.read_version}.
|Commit id=$commit_id failed to update meta because of data info conflict. Please retry.
|Error table: ${info.table_name}.
""".stripMargin,
commit_id)
}
def partitionChangedException(range_value: String, commit_id: String): MetaRerunException = {
new MetaRerunException(
s"""
|Error: Partition `$range_value` has been changed, it may have another job drop and create a newer one.
""".stripMargin,
commit_id)
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/MergeParquetPartitionReaderFactory.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet
import java.net.URI
import java.time.ZoneId
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate}
import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS
import org.apache.parquet.hadoop.{ParquetFileReader, ParquetInputFormat, ParquetInputSplit}
import org.apache.spark.TaskContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader}
import org.apache.spark.sql.execution.datasources.parquet._
import org.apache.spark.sql.execution.datasources.v2.merge.MergePartitionedFile
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator.MergeOperator
import org.apache.spark.sql.execution.datasources.{DataSourceUtils, RecordReaderIterator}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.{AtomicType, StructType}
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.SerializableConfiguration
import scala.collection.mutable
/**
* A factory used to create Parquet readers.
*
* @param sqlConf SQL configuration.
* @param broadcastedConf Broadcast serializable Hadoop Configuration.
* @param dataSchema Schema of Parquet files.
* @param readDataSchema Required schema of Parquet files.
* @param partitionSchema Schema of partitions.
* // * @param filterMap Filters to be pushed down in the batch scan.
*/
case class MergeParquetPartitionReaderFactory(sqlConf: SQLConf,
broadcastedConf: Broadcast[SerializableConfiguration],
dataSchema: StructType,
readDataSchema: StructType,
partitionSchema: StructType,
filters: Array[Filter],
mergeOperatorInfo: Map[String, MergeOperator[Any]])
extends MergeFilePartitionReaderFactory(mergeOperatorInfo) with Logging {
private val isCaseSensitive = sqlConf.caseSensitiveAnalysis
private val resultSchema = StructType(partitionSchema.fields ++ readDataSchema.fields)
private val enableOffHeapColumnVector = sqlConf.offHeapColumnVectorEnabled
private val enableVectorizedReader: Boolean = sqlConf.parquetVectorizedReaderEnabled &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
private val enableRecordFilter: Boolean = sqlConf.parquetRecordFilterEnabled
private val timestampConversion: Boolean = sqlConf.isParquetINT96TimestampConversion
private val capacity = sqlConf.parquetVectorizedReaderBatchSize
private val enableParquetFilterPushDown: Boolean = sqlConf.parquetFilterPushDown
private val pushDownDate = sqlConf.parquetFilterPushDownDate
private val pushDownTimestamp = sqlConf.parquetFilterPushDownTimestamp
private val pushDownDecimal = sqlConf.parquetFilterPushDownDecimal
private val pushDownStringStartWith = sqlConf.parquetFilterPushDownStringStartWith
private val pushDownInFilterThreshold = sqlConf.parquetFilterPushDownInFilterThreshold
// schemea: path->schema source: path->file|path->file|path->file
private val requestSchemaMap: mutable.Map[String, String] = broadcastedConf.value.value
.get(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA).split("\\|")
.map(str => mutable.Map(str.split("->")(0) -> str.split("->")(1)))
.fold(mutable.Map[String, String]())(_ ++ _)
override def supportColumnarReads(partition: InputPartition): Boolean = {
//don't support columnar reads, but retain this assert
assert(sqlConf.parquetVectorizedReaderEnabled && sqlConf.wholeStageEnabled &&
resultSchema.length <= sqlConf.wholeStageMaxNumFields &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType]))
false
}
override def buildReader(file: MergePartitionedFile): PartitionReader[InternalRow] = {
throw new Exception("Star Lake Merge scan shouldn't use this method, only buildColumnarReader will be used.")
}
override def buildColumnarReader(file: MergePartitionedFile): PartitionReader[ColumnarBatch] = {
val vectorizedReader = createVectorizedReader(file)
vectorizedReader.enableReturningBatches()
new PartitionReader[ColumnarBatch] {
override def next(): Boolean = vectorizedReader.nextKeyValue()
override def get(): ColumnarBatch =
vectorizedReader.getCurrentValue.asInstanceOf[ColumnarBatch]
override def close(): Unit = vectorizedReader.close()
}
}
private def createVectorizedReader(file: MergePartitionedFile): VectorizedParquetRecordReader = {
val vectorizedReader = buildReaderBase(file, createVectorizedReader)
.asInstanceOf[VectorizedParquetRecordReader]
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
vectorizedReader
}
private def createVectorizedReader(split: ParquetInputSplit,
partitionValues: InternalRow,
hadoopAttemptContext: TaskAttemptContextImpl,
pushed: Option[FilterPredicate],
convertTz: Option[ZoneId],
datetimeRebaseMode: LegacyBehaviorPolicy.Value,
int96RebaseMode: LegacyBehaviorPolicy.Value): VectorizedParquetRecordReader = {
val taskContext = Option(TaskContext.get())
val vectorizedReader = new VectorizedParquetRecordReader(
convertTz.orNull,
datetimeRebaseMode.toString,
int96RebaseMode.toString,
enableOffHeapColumnVector && taskContext.isDefined,
capacity)
val iter = new RecordReaderIterator(vectorizedReader)
// SPARK-23457 Register a task completion listener before `initialization`.
taskContext.foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
logDebug(s"Appending $partitionSchema $partitionValues")
vectorizedReader
}
private def buildReaderBase[T](file: MergePartitionedFile,
buildReaderFunc: (
ParquetInputSplit, InternalRow, TaskAttemptContextImpl,
Option[FilterPredicate], Option[ZoneId],
LegacyBehaviorPolicy.Value,
LegacyBehaviorPolicy.Value) => RecordReader[Void, T]): RecordReader[Void, T] = {
val conf = broadcastedConf.value.value
val filePath = new Path(new URI(file.filePath))
val split =
new org.apache.parquet.hadoop.ParquetInputSplit(
filePath,
file.start,
file.start + file.length,
file.length,
Array.empty,
null)
lazy val footerFileMetaData =
ParquetFileReader.readFooter(conf, filePath, SKIP_ROW_GROUPS).getFileMetaData
// Try to push down filters when filter push-down is enabled.
val pushed = if (enableParquetFilterPushDown) {
val parquetSchema = footerFileMetaData.getSchema
val parquetFilters = new ParquetFilters(parquetSchema, pushDownDate, pushDownTimestamp,
pushDownDecimal, pushDownStringStartWith, pushDownInFilterThreshold, isCaseSensitive)
filters
// Collects all converted Parquet filter predicates. Notice that not all predicates can be
// converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
// is used here.
.flatMap(parquetFilters.createFilter)
.reduceOption(FilterApi.and)
} else {
None
}
// PARQUET_INT96_TIMESTAMP_CONVERSION says to apply timezone conversions to int96 timestamps'
// *only* if the file was created by something other than "parquet-mr", so check the actual
// writer here for this file. We have to do this per-file, as each file in the table may
// have different writers.
// Define isCreatedByParquetMr as function to avoid unnecessary parquet footer reads.
def isCreatedByParquetMr: Boolean =
footerFileMetaData.getCreatedBy().startsWith("parquet-mr")
val convertTz =
if (timestampConversion && !isCreatedByParquetMr) {
Some(DateTimeUtils.getZoneId(conf.get(SQLConf.SESSION_LOCAL_TIMEZONE.key)))
} else {
None
}
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
conf.set(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA, requestSchemaMap(file.rangeVersion))
conf.set(ParquetWriteSupport.SPARK_ROW_SCHEMA, requestSchemaMap(file.rangeVersion))
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
// Try to push down filters when filter push-down is enabled.
// Notice: This push-down is RowGroups level, not individual records.
if (pushed.isDefined) {
ParquetInputFormat.setFilterPredicate(hadoopAttemptContext.getConfiguration, pushed.get)
}
val datetimeRebaseMode = DataSourceUtils.datetimeRebaseMode(
footerFileMetaData.getKeyValueMetaData.get,
SQLConf.get.getConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ))
val int96RebaseMode = DataSourceUtils.int96RebaseMode(
footerFileMetaData.getKeyValueMetaData.get,
SQLConf.get.getConf(SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_READ))
val reader = buildReaderFunc(
split, file.partitionValues, hadoopAttemptContext, pushed, convertTz, datetimeRebaseMode, int96RebaseMode)
reader.initialize(split, hadoopAttemptContext)
reader
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/material_view/AggregateInfo.scala
|
<reponame>engine-plus/StarLake
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.material_view
import com.engineplus.star.meta.MetaUtils
import org.apache.spark.sql.star.material_view.ConstructQueryInfo._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class AggregateInfo {
private var aggTables: Set[String] = Set.empty[String]
private var aggColumns: Set[String] = Set.empty[String]
private val aggEqualConditions: mutable.Set[(String, String)] = mutable.Set[(String, String)]()
private val aggOtherConditions: ArrayBuffer[String] = new ArrayBuffer[String]()
def setAggEqualCondition(left: String, right: String): Unit = {
if (left.compareTo(right) <= 0) {
aggEqualConditions.add(left, right)
} else {
aggEqualConditions.add(right, left)
}
}
def setAggOtherCondition(cond: String): Unit = {
aggOtherConditions += cond
}
def setAggInfo(tables: Set[String], cols: Set[String]): Unit = {
aggTables = tables
aggColumns = cols
}
def buildAggregateDetail(tables: Map[String, String], asInfo: Map[String, String]): AggregateDetail = {
AggregateDetail(
aggTables.map(getFinalStringByReplace(_, tables, asInfo)),
aggColumns.map(getFinalStringByReplace(_, tables, asInfo)),
aggEqualConditions.map(m =>
(getFinalStringByReplace(m._1, tables, asInfo),
getFinalStringByReplace(m._2, tables, asInfo)))
.toSet,
aggOtherConditions.map(getFinalStringByReplace(_, tables, asInfo)).toSet)
}
}
object AggregateInfo {
def buildDetail(str: String): AggregateDetail = {
val split = str.split(MetaUtils.STAR_LAKE_SEP_03, -1)
assert(split.length == 4)
val equal = if (split(2).equals("")) {
Set.empty[(String, String)]
} else {
split(2).split(MetaUtils.STAR_LAKE_SEP_02, -1).map(m => {
val arr = m.split(MetaUtils.STAR_LAKE_SEP_01)
assert(arr.length == 2)
(arr(0), arr(1))
}).toSet
}
val other = if (split(3).equals("")) {
Set.empty[String]
} else {
split(3).split(MetaUtils.STAR_LAKE_SEP_02).toSet
}
AggregateDetail(
split(0).split(MetaUtils.STAR_LAKE_SEP_02).toSet,
split(1).split(MetaUtils.STAR_LAKE_SEP_02).toSet,
equal,
other)
}
}
case class AggregateDetail(tables: Set[String],
columns: Set[String],
equalCondition: Set[(String, String)],
otherCondition: Set[String]) {
override def toString: String = {
val equal = equalCondition
.map(m => m._1 + MetaUtils.STAR_LAKE_SEP_01 + m._2)
.mkString(MetaUtils.STAR_LAKE_SEP_02)
val other = otherCondition.mkString(MetaUtils.STAR_LAKE_SEP_02)
Seq(tables.mkString(MetaUtils.STAR_LAKE_SEP_02),
columns.mkString(MetaUtils.STAR_LAKE_SEP_02),
equal,
other)
.mkString(MetaUtils.STAR_LAKE_SEP_03)
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/MergeOperatorColumnarBatchRow.scala
|
<filename>src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/MergeOperatorColumnarBatchRow.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator.{DefaultMergeOp, FieldIndex, MergeColumnIndex, MergeOperator}
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.{ColumnVector, ColumnarBatch}
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
/**
*
* @param columns ordered column vectors of all file
* @param mergeOps merge operators
* @param indexTypeArray result schema index and type
*/
class MergeOperatorColumnarBatchRow(columns: Array[ColumnVector],
mergeOps: Seq[MergeOperator[Any]],
indexTypeArray: Seq[FieldIndex]) extends MergeBatchRow {
val size: Int = indexTypeArray.length
var idMix: Seq[Seq[MergeColumnIndex]] = _
var value: Array[Any] = new Array[Any](size)
private def getIndex(ordinal: Int): Seq[MergeColumnIndex] = {
idMix(ordinal)
}
//merge data from idMix
def mergeValues(): Unit = {
idMix.zipWithIndex.foreach(m => {
if (m._1.nonEmpty) {
val dataType = indexTypeArray(m._2).filedType
dataType match {
case StringType => mergeUTF8String(m._1, m._2)
case IntegerType | DateType => mergeInt(m._1, m._2)
case BooleanType => mergeBoolean(m._1, m._2)
case ByteType => mergeBoolean(m._1, m._2)
case ShortType => mergeShort(m._1, m._2)
case LongType | TimestampType => mergeLong(m._1, m._2)
case FloatType => mergeFloat(m._1, m._2)
case DoubleType => mergeDouble(m._1, m._2)
case BinaryType => mergeBinary(m._1, m._2)
case CalendarIntervalType => mergeInterval(m._1, m._2)
case t: DecimalType => mergeDecimal(m._1, m._2, t.precision, t.scale)
case t: StructType => mergeStruct(m._1, m._2, t.size)
case _: ArrayType => mergeArray(m._1, m._2)
case _: MapType => mergeMap(m._1, m._2)
case o => throw new UnsupportedOperationException(s"StarLake MergeOperator don't support type ${o.typeName}")
}
}
})
}
private def getMergeOp(ordinal: Int): MergeOperator[Any] = {
mergeOps(ordinal)
}
override def numFields(): Int = {
idMix.length
}
override def copy(): InternalRow = {
val row: GenericInternalRow = new GenericInternalRow(idMix.length)
(0 to numFields()).foreach(i => {
if (isNullAt(i)) {
row.setNullAt(i)
} else {
val colIdAndRowId: Seq[MergeColumnIndex] = getIndex(i)
val dt = columns(colIdAndRowId.head.columnVectorIndex).dataType()
setRowData(i, dt, row)
}
})
row
}
override def anyNull: Boolean = {
throw new UnsupportedOperationException()
}
override def isNullAt(ordinal: Int): Boolean = {
getIndex(ordinal).isEmpty || value(ordinal) == null
}
override def getBoolean(ordinal: Int): Boolean = {
value(ordinal).asInstanceOf[Boolean]
}
override def getByte(ordinal: Int): Byte = {
value(ordinal).asInstanceOf[Byte]
}
override def getShort(ordinal: Int): Short = {
value(ordinal).asInstanceOf[Short]
}
override def getInt(ordinal: Int): Int = {
value(ordinal).asInstanceOf[Int]
}
override def getLong(ordinal: Int): Long = {
value(ordinal).asInstanceOf[Long]
}
override def getFloat(ordinal: Int): Float = {
value(ordinal).asInstanceOf[Float]
}
override def getDouble(ordinal: Int): Double = {
value(ordinal).asInstanceOf[Double]
}
override def getDecimal(ordinal: Int, precision: Int, scale: Int): Decimal = {
value(ordinal).asInstanceOf[Decimal]
}
override def getUTF8String(ordinal: Int): UTF8String = {
value(ordinal).asInstanceOf[UTF8String]
}
override def getBinary(ordinal: Int): Array[Byte] = {
value(ordinal).asInstanceOf[Array[Byte]]
}
override def getInterval(ordinal: Int): CalendarInterval = {
value(ordinal).asInstanceOf[CalendarInterval]
}
override def getStruct(ordinal: Int, numFields: Int): InternalRow = {
value(ordinal).asInstanceOf[InternalRow]
}
override def getArray(ordinal: Int): ArrayData = {
value(ordinal).asInstanceOf[ArrayData]
}
override def getMap(ordinal: Int): MapData = {
value(ordinal).asInstanceOf[MapData]
}
override def update(i: Int, value: Any): Unit = {
throw new UnsupportedOperationException()
}
override def setNullAt(i: Int): Unit = {
throw new UnsupportedOperationException()
}
/** merge values */
def mergeBoolean(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getBoolean(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getBoolean(m.rowIndex)
}
}).asInstanceOf[Seq[Boolean]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Boolean]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeByte(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getByte(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getByte(m.rowIndex)
}
}).asInstanceOf[Seq[Byte]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Byte]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeShort(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getShort(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getShort(m.rowIndex)
}
}).asInstanceOf[Seq[Short]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Short]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeInt(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getInt(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getInt(m.rowIndex)
}
}).asInstanceOf[Seq[Int]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Int]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeLong(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getLong(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getLong(m.rowIndex)
}
}).asInstanceOf[Seq[Int]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Int]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeFloat(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getFloat(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getFloat(m.rowIndex)
}
}).asInstanceOf[Seq[Float]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Float]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeDouble(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getDouble(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getDouble(m.rowIndex)
}
}).asInstanceOf[Seq[Double]]
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Double]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeDecimal(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int, precision: Int, scale: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getDecimal(colIdAndRowId.last.rowIndex, precision, scale)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getDecimal(colIdAndRowId.last.rowIndex, precision, scale)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Decimal]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeUTF8String(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getUTF8String(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getUTF8String(m.rowIndex).toString
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[String]]
value(ordinal) = UTF8String.fromString(mergeOp.mergeData(data))
}
}
def mergeBinary(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getBinary(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getBinary(m.rowIndex)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[Array[Byte]]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeInterval(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getInterval(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getInterval(m.rowIndex)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[CalendarInterval]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeStruct(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int, numFields: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getStruct(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getStruct(m.rowIndex)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[InternalRow]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeArray(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getArray(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getArray(m.rowIndex)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[ArrayData]]
value(ordinal) = mergeOp.mergeData(data)
}
}
def mergeMap(colIdAndRowId: Seq[MergeColumnIndex], ordinal: Int): Unit = {
if (getMergeOp(ordinal).isInstanceOf[DefaultMergeOp[Any]]) {
if (columns(colIdAndRowId.last.columnVectorIndex).isNullAt(colIdAndRowId.last.rowIndex)) {
value(ordinal) = null
} else {
value(ordinal) = columns(colIdAndRowId.last.columnVectorIndex).getMap(colIdAndRowId.last.rowIndex)
}
} else {
val data = colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getMap(m.rowIndex)
}
})
val mergeOp = getMergeOp(ordinal).asInstanceOf[MergeOperator[MapData]]
value(ordinal) = mergeOp.mergeData(data)
}
}
///////////////////////////////////////////////////////////////////////////////////
/** get values need to be merged */
def getMergeBoolean(ordinal: Int): Seq[Boolean] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getBoolean(m.rowIndex)
}
}).asInstanceOf[Seq[Boolean]]
}
def getMergeByte(ordinal: Int): Seq[Byte] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getByte(m.rowIndex)
}
}).asInstanceOf[Seq[Byte]]
}
def getMergeShort(ordinal: Int): Seq[Short] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getShort(m.rowIndex)
}
}).asInstanceOf[Seq[Short]]
}
def getMergeInt(ordinal: Int): Seq[Int] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getInt(m.rowIndex)
}
}).asInstanceOf[Seq[Int]]
}
def getMergeLong(ordinal: Int): Seq[Long] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getLong(m.rowIndex)
}
}).asInstanceOf[Seq[Long]]
}
def getMergeFloat(ordinal: Int): Seq[Float] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getFloat(m.rowIndex)
}
}).asInstanceOf[Seq[Float]]
}
def getMergeDouble(ordinal: Int): Seq[Double] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getDouble(m.rowIndex)
}
}).asInstanceOf[Seq[Double]]
}
def getMergeDecimal(ordinal: Int, precision: Int, scale: Int): Seq[Decimal] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getDecimal(m.rowIndex, precision, scale)
}
})
}
def getMergeUTF8String(ordinal: Int): Seq[UTF8String] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getUTF8String(m.rowIndex).clone()
}
})
}
def getMergeBinary(ordinal: Int): Seq[Array[Byte]] = {
val colIdAndRowId = getIndex(ordinal)
columns(colIdAndRowId.head.columnVectorIndex).getBinary(colIdAndRowId.head.rowIndex)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getBinary(m.rowIndex)
}
})
}
def getMergeInterval(ordinal: Int): Seq[CalendarInterval] = {
val colIdAndRowId = getIndex(ordinal)
columns(colIdAndRowId.head.columnVectorIndex).getInterval(colIdAndRowId.head.rowIndex)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getInterval(m.rowIndex)
}
})
}
def getMergeStruct(ordinal: Int, numFields: Int): Seq[InternalRow] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getStruct(m.rowIndex)
}
})
}
def getMergeArray(ordinal: Int): Seq[ArrayData] = {
val colIdAndRowId = getIndex(ordinal)
columns(colIdAndRowId.head.columnVectorIndex).getArray(colIdAndRowId.head.rowIndex)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getArray(m.rowIndex)
}
})
}
def getMergeMap(ordinal: Int): Seq[MapData] = {
val colIdAndRowId = getIndex(ordinal)
colIdAndRowId.map(m => {
if (columns(m.columnVectorIndex).isNullAt(m.rowIndex)) {
null
} else {
columns(m.columnVectorIndex).getMap(m.rowIndex)
}
})
}
}
|
engine-plus/StarLake
|
src/test/scala/org/apache/spark/sql/star/rules/RewriteQueryByMaterialViewBase.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.rules
import com.engineplus.star.tables.StarTable
import org.apache.spark.sql.star.test.StarLakeSQLCommandTest
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.{AnalysisException, QueryTest}
import org.apache.spark.util.Utils
import org.scalatest.BeforeAndAfterAll
abstract class RewriteQueryByMaterialViewBase extends QueryTest
with SharedSparkSession with StarLakeSQLCommandTest with BeforeAndAfterAll {
import testImplicits._
val tableName1: String = "tmp_table1"
val tablePath1: String = Utils.createTempDir().getCanonicalPath
val tablePath2: String = Utils.createTempDir().getCanonicalPath
val tablePath3: String = Utils.createTempDir().getCanonicalPath
def prepareTable1(): Unit = {
Seq((1, "a", 11), (2, "b", 11), (3, "c", 11), (4, "d", 11)).toDF("key", "value", "range")
.write
.mode("overwrite")
.format("star")
.option("rangePartitions", "range")
.option("hashPartitions", "key")
.option("hashBucketNum", "2")
.option("shortTableName", tableName1)
.save(tablePath1)
}
def prepareTable2(): Unit = {
Seq((1, "aa", 22), (3, "cc", 22), (5, "ee", 22), (6, "ff", 22)).toDF("key", "value", "range")
.write
.mode("overwrite")
.format("star")
.save(tablePath2)
}
def prepareTable3(): Unit = {
Seq(
(1, "a", "a2", 22), (3, "c", "c2", 22), (5, "e", "e2", 22), (6, "f", "f2", 22),
(1, "a", "a3", 33), (4, "d", "d3", 33), (5, "e", "e3", 33), (6, "f", "f3", 33)
).toDF("k1", "k2", "value", "range")
.write
.mode("overwrite")
.option("rangePartitions", "range")
.format("star")
.save(tablePath3)
}
def prepareMaterialViews(): Unit
def cleanMaterialViews(): Unit
def dropTable(tablePath: String): Unit = {
try {
StarTable.forPath(tablePath).dropTable()
} catch {
case e: AnalysisException
if e.getMessage().contains("Table") && e.getMessage().contains("doesn't exist") =>
case e => throw e
}
}
override def beforeAll() {
super.beforeAll()
prepareTable1()
prepareTable2()
prepareTable3()
prepareMaterialViews()
}
override def afterAll() {
StarTable.forPath(tablePath1).dropTable()
StarTable.forPath(tablePath2).dropTable()
StarTable.forPath(tablePath3).dropTable()
cleanMaterialViews()
super.afterAll()
}
}
class SimpleRewriteWithSingleTable extends RewriteQueryByMaterialViewBase {
import testImplicits._
val viewName1: String = "material_view1"
val viewPath1: String = Utils.createTempDir().getCanonicalPath
override def prepareMaterialViews(): Unit = {
val sqlText1 =
s"""
|select a.key,value,range,length(range) as lr,concat_ws(',',key,a.value,'something') as cw
|from star.`$tablePath1` a
|where range>=5 and range<30
""".stripMargin
StarTable.createMaterialView(
viewName1,
viewPath1,
sqlText1
)
}
override def cleanMaterialViews(): Unit = {
dropTable(viewPath1)
}
test("same query will be rewritten by material view") {
val sqlText =
s"""
|select a.key,value,range,length(range) as lr,concat_ws(',',key,a.value,'something') as cw
|from star.`$tablePath1` a
|where range>=5 and range<30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath1))
checkAnswer(query.select("key", "value", "range", "lr", "cw"),
Seq(
(1, "a", 11, 2, "1,a,something"),
(2, "b", 11, 2, "2,b,something"),
(3, "c", 11, 2, "3,c,something"),
(4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw"))
}
test("query with short table name will be rewritten by material view") {
val sqlText =
s"""
|select a.key,value,range,length(range) as lr,concat_ws(',',key,a.value,'something') as cw
|from star.`$tableName1` a
|where range>=5 and range<30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath1))
}
test("same query with range condition both sides exchanged will be rewritten by material view") {
val sqlText =
s"""
|select a.key,value,range,length(range) as lr,concat_ws(',',key,a.value,'something') as cw
|from star.`$tablePath1` a
|where 5<=range and 30>range
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath1))
checkAnswer(query.select("key", "value", "range", "lr", "cw"),
Seq(
(1, "a", 11, 2, "1,a,something"),
(2, "b", 11, 2, "2,b,something"),
(3, "c", 11, 2, "3,c,something"),
(4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw"))
}
test("same query with different column name will be rewritten by material view") {
val sqlText =
s"""
|select a.key,value,range,length(range) as cw,concat_ws(',',key,a.value,'something') as lr
|from star.`$tablePath1` a
|where range>=5 and range<30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath1))
checkAnswer(query.select("key", "value", "range", "cw", "lr"),
Seq(
(1, "a", 11, 2, "1,a,something"),
(2, "b", 11, 2, "2,b,something"),
(3, "c", 11, 2, "3,c,something"),
(4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "cw", "lr"))
}
test("query with large range interval shouldn't rewrite") {
val sqlText1 =
s"""
|select key
|from star.`$tablePath1` a
|where range>=5
""".stripMargin
val query1 = spark.sql(sqlText1)
val plan1 = query1.queryExecution.optimizedPlan.toString()
assert(!plan1.contains(viewPath1))
val sqlText2 =
s"""
|select key
|from star.`$tablePath1` a
|where range>=5 and range<=30
""".stripMargin
val query2 = spark.sql(sqlText2)
val plan2 = query2.queryExecution.optimizedPlan.toString()
assert(!plan2.contains(viewPath1))
}
test("query with subset range interval should rewrite") {
val sqlText1 =
s"""
|select key
|from star.`$tablePath1` a
|where range>5 and range<30
""".stripMargin
val query1 = spark.sql(sqlText1)
val plan1 = query1.queryExecution.optimizedPlan.toString()
assert(plan1.contains(viewPath1))
checkAnswer(query1.select("key"),
Seq(
(1, "a", 11, 2, "1,a,something"), (2, "b", 11, 2, "2,b,something"),
(3, "c", 11, 2, "3,c,something"), (4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw")
.select("key"))
val sqlText2 =
s"""
|select key
|from star.`$tablePath1` a
|where range>7 and range<=20
""".stripMargin
val query2 = spark.sql(sqlText2)
val plan2 = query2.queryExecution.optimizedPlan.toString()
assert(plan2.contains(viewPath1))
checkAnswer(query2.select("key"),
Seq(
(1, "a", 11, 2, "1,a,something"), (2, "b", 11, 2, "2,b,something"),
(3, "c", 11, 2, "3,c,something"), (4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw")
.select("key"))
}
test("query with external condition should rewrite") {
val sqlText1 =
s"""
|select key
|from star.`$tablePath1` a
|where range>5 and range<30 and key > 2
""".stripMargin
val query1 = spark.sql(sqlText1)
val plan1 = query1.queryExecution.optimizedPlan.toString()
assert(plan1.contains(viewPath1))
checkAnswer(query1.select("key"),
Seq(
(3, "c", 11, 2, "3,c,something"), (4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw")
.select("key"))
val sqlText2 =
s"""
|select key
|from star.`$tablePath1` a
|where range>7 and range<=20 and (value='b' or value='d')
""".stripMargin
val query2 = spark.sql(sqlText2)
val plan2 = query2.queryExecution.optimizedPlan.toString()
assert(plan2.contains(viewPath1))
checkAnswer(query2.select("key"),
Seq(
(2, "b", 11, 2, "2,b,something"),
(4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw")
.select("key"))
}
test("query with external or condition will be rewritten by material view") {
val sqlText =
s"""
|select a.key,value,range,length(range) as lr,concat_ws(',',key,a.value,'something') as cw
|from star.`$tablePath1` a
|where range>=5 and range<30 and (key=1 or key>=3)
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath1))
checkAnswer(query.select("key", "value", "range", "lr", "cw"),
Seq(
(1, "a", 11, 2, "1,a,something"),
(3, "c", 11, 2, "3,c,something"),
(4, "d", 11, 2, "4,d,something"))
.toDF("key", "value", "range", "lr", "cw"))
}
}
class RewriteWithJoinCondition extends RewriteQueryByMaterialViewBase {
import testImplicits._
val viewName2: String = "material_view2"
val viewPath2: String = Utils.createTempDir().getCanonicalPath
val viewName5: String = "material_view5"
val viewPath5: String = Utils.createTempDir().getCanonicalPath
override def prepareMaterialViews(): Unit = {
val sqlText2 =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,b.key b_key,b.value b_value,b.range b_range,
|length(b.range) as lr,
|concat_ws(',',a.key,a.value,'something') as cw
|from star.`$tablePath1` a join star.`$tablePath2` b on a.key=b.key
|where a.range>=2 and b.range<30 and b.value='cc'
""".stripMargin
StarTable.createMaterialView(
viewName2,
viewPath2,
sqlText2
)
val sqlText5 =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,
|b.t1_value b_t1_value,b.t2_value,b.range b_range
|from star.`$tablePath1` a join
|(select t1.key,t2.k2,t1.value t1_value,t2.value t2_value,t2.range range
| from star.`$tablePath2` t1 join
| (select * from star.`$tablePath3` where range>10) t2
| on t1.key=t2.k1) b
|on a.key=b.key
|where a.range>=5
""".stripMargin
StarTable.createMaterialView(
viewName5,
viewPath5,
sqlText5
)
}
override def cleanMaterialViews(): Unit = {
dropTable(viewPath2)
dropTable(viewPath5)
}
test("same query should rewrite - join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,b.key b_key,b.value b_value,b.range b_range,
|length(b.range) as lr,
|concat_ws(',',a.key,a.value,'something') as cw
|from star.`$tablePath1` a join star.`$tablePath2` b on a.key=b.key
|where a.range>=2 and b.range<30 and b.value='cc'
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath2))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_key", "b_value", "b_range", "lr", "cw"),
Seq(
(3, "c", 11, 3, "cc", 22, 2, "3,c,something"))
.toDF("a_key", "a_value", "a_range", "b_key", "b_value", "b_range", "lr", "cw"))
}
test("query with external condition should rewrite - join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,b.key b_key,b.value b_value,b.range b_range,
|length(b.range) as lr,
|concat_ws(',',a.key,a.value,'something') as cw
|from star.`$tablePath1` a join star.`$tablePath2` b on a.key=b.key
|where a.range>2 and b.range<25 and b.value='cc' and a.value='c'
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath2))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_key", "b_value", "b_range", "lr", "cw"),
Seq(
(3, "c", 11, 3, "cc", 22, 2, "3,c,something"))
.toDF("a_key", "a_value", "a_range", "b_key", "b_value", "b_range", "lr", "cw"))
}
test("query with less condition should not rewrite - join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,b.key b_key,b.value b_value,b.range b_range,
|length(b.range) as lr,
|concat_ws(',',a.key,a.value,'something') as cw
|from star.`$tablePath1` a join star.`$tablePath2` b on a.key=b.key
|where a.range>2 and b.range<25
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath2))
}
test("same query should rewrite - multi table inner join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,
|b.t1_value b_t1_value,b.t2_value,b.range b_range
|from star.`$tablePath1` a join
|(select t1.key,t2.k2,t1.value t1_value,t2.value t2_value,t2.range range
| from star.`$tablePath2` t1 join
| (select * from star.`$tablePath3` where range>10) t2
| on t1.key=t2.k1) b
|on a.key=b.key
|where a.range>=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath5))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_t1_value", "t2_value", "b_range"),
Seq(
(1, "a", 11, "aa", "a2", 22),
(1, "a", 11, "aa", "a3", 33),
(3, "c", 11, "cc", "c2", 22))
.toDF("a_key", "a_value", "a_range", "b_t1_value", "t2_value", "b_range"))
}
test("query with external condition in `on` should rewrite - multi table inner join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,
|b.t1_value b_t1_value,b.t2_value,b.range b_range
|from star.`$tablePath1` a join
|(select t1.key,t2.k2,t1.value t1_value,t2.value t2_value,t2.range range
| from star.`$tablePath2` t1 join
| (select * from star.`$tablePath3` where range>10) t2
| on t1.key=t2.k1 and key<10) b
|on a.key=b.key and b.t2_value!='a2'
|where a.range>=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath5))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_t1_value", "t2_value", "b_range"),
Seq(
(1, "a", 11, "aa", "a3", 33),
(3, "c", 11, "cc", "c2", 22))
.toDF("a_key", "a_value", "a_range", "b_t1_value", "t2_value", "b_range"))
}
test("query with less condition shouldn't rewrite - multi table inner join") {
val sqlText =
s"""
|select a.key as a_key,a.value a_value,a.range a_range,
|b.t1_value b_t1_value,b.t2_value,b.range b_range
|from star.`$tablePath1` a join
|(select t1.key,t2.k2,t1.value t1_value,t2.value t2_value,t2.range range
| from star.`$tablePath2` t1 join
| (select * from star.`$tablePath3` where range>10) t2
| on t1.key=t2.k1) b
|on a.key=b.key
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath5))
}
}
class RewriteWithAggregateCondition extends RewriteQueryByMaterialViewBase {
import testImplicits._
val viewName3: String = "material_view3"
val viewPath3: String = Utils.createTempDir().getCanonicalPath
val viewName4: String = "material_view4"
val viewPath4: String = Utils.createTempDir().getCanonicalPath
val viewName6: String = "material_view6"
val viewPath6: String = Utils.createTempDir().getCanonicalPath
override def prepareMaterialViews(): Unit = {
val sqlText3 =
s"""
|select a.k1 a_k1,a.k2 a_k2,collect_list(a.value) a_value,max(a.range) a_range
|from star.`$tablePath3` a
|where a.range>=2
|group by a.k1,a.k2 having a.k1 > 1
""".stripMargin
StarTable.createMaterialView(
viewName3,
viewPath3,
sqlText3
)
val sqlText4 =
s"""
|select a.key as a_key,max(a.value) a_value,last(a.range) a_range,min(b.value) b_value
|from star.`$tablePath1` a left join star.`$tablePath2` b on a.key=b.key
|where a.range>=2
|group by a.key having a.key > 1
""".stripMargin
StarTable.createMaterialView(
viewName4,
viewPath4,
sqlText4
)
val sqlText6 =
s"""
|select max(a.key) as a_key,min(a.value) a_value,first(a.range) a_range,
|last(b.value) b_value,last(b.range) b_range,
|c.k1,c.k2,collect_list(c.value) c_value,collect_list(c.range) c_range
|from star.`$tablePath1` a,star.`$tablePath2` b,star.`$tablePath3` c
|where a.key=b.key and b.key=c.k1 and a.range>=5 and c.range>10
|group by c.k1,c.k2
""".stripMargin
StarTable.createMaterialView(
viewName6,
viewPath6,
sqlText6
)
}
override def cleanMaterialViews(): Unit = {
dropTable(viewPath3)
dropTable(viewPath4)
dropTable(viewPath6)
}
test("same query should rewrite - aggregate") {
val sqlText =
s"""
|select a.k1 a_k1,a.k2 a_k2,collect_list(a.value) a_value,max(a.range) a_range
|from star.`$tablePath3` a
|where a.range>=2
|group by a.k1,a.k2 having a.k1 > 1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath3))
checkAnswer(query.select("a_k1", "a_k2", "a_value", "a_range"),
Seq(
(3, "c", List("c2"), 22),
(4, "d", List("d3"), 33),
(5, "e", List("e2", "e3"), 33),
(6, "f", List("f2", "f3"), 33))
.toDF("a_k1", "a_k2", "a_value", "a_range"))
}
test("query with external having condition should rewrite - aggregate") {
val sqlText =
s"""
|select a.k1 a_k1,a.k2 a_k2,collect_list(a.value) a_value,max(a.range) a_range
|from star.`$tablePath3` a
|where a.range>=2
|group by a.k1,a.k2 having a.k1 > 1 and a.k2='d'
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath3))
checkAnswer(query.select("a_k1", "a_k2", "a_value", "a_range"),
Seq(
(4, "d", List("d3"), 33))
.toDF("a_k1", "a_k2", "a_value", "a_range"))
}
test("query with external condition under aggregate shouldn't rewrite - aggregate") {
val sqlText =
s"""
|select a.k1 a_k1,a.k2 a_k2,collect_list(a.value) a_value,max(a.range) a_range
|from star.`$tablePath3` a
|where a.range>=2 and a.k2='d'
|group by a.k1,a.k2 having a.k1 > 1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath3))
}
test("same query should rewrite - aggregate & left join") {
val sqlText =
s"""
|select a.key as a_key,max(a.value) a_value,last(a.range) a_range,min(b.value) b_value
|from star.`$tablePath1` a left join star.`$tablePath2` b on a.key=b.key
|where a.range>=2
|group by a.key having a.key > 1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath4))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_value"),
Seq(
(2, "b", 11, null),
(3, "c", 11, "cc"),
(4, "d", 11, null))
.toDF("a_key", "a_value", "a_range", "b_value"))
}
test("query with external having condition should rewrite - aggregate & left join") {
val sqlText =
s"""
|select a.key as a_key,max(a.value) a_value,last(a.range) a_range,min(b.value) b_value
|from star.`$tablePath1` a left join star.`$tablePath2` b on a.key=b.key
|where a.range>=2
|group by a.key having a.key>1 and min(b.value)='cc'
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath4))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_value"),
Seq(
(3, "c", 11, "cc"))
.toDF("a_key", "a_value", "a_range", "b_value"))
}
test("query with external condition under aggregate shouldn't rewrite - aggregate & left join") {
val sqlText =
s"""
|select a.key as a_key,max(a.value) a_value,last(a.range) a_range,min(b.value) b_value
|from star.`$tablePath1` a left join star.`$tablePath2` b on a.key=b.key
|where a.range>=2 and b.value='c'
|group by a.key having a.key>1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath4))
}
test("query with external join condition under aggregate shouldn't rewrite - aggregate & left join") {
val sqlText =
s"""
|select a.key as a_key,max(a.value) a_value,last(a.range) a_range,min(b.value) b_value
|from star.`$tablePath1` a left join star.`$tablePath2` b on a.key=b.key and b.value='c'
|where a.range>=2
|group by a.key having a.key>1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath4))
}
test("same query should rewrite - aggregate & multi join") {
val sqlText =
s"""
|select max(a.key) as a_key,min(a.value) a_value,first(a.range) a_range,
|last(b.value) b_value,last(b.range) b_range,
|c.k1,c.k2,collect_list(c.value) c_value,collect_list(c.range) c_range
|from star.`$tablePath1` a,star.`$tablePath2` b,star.`$tablePath3` c
|where a.key=b.key and b.key=c.k1 and a.range>=5 and c.range>10
|group by c.k1,c.k2
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath6))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_value", "b_range", "k1", "k2", "c_value", "c_range"),
Seq(
(1, "a", 11, "aa", 22, 1, "a", List("a3", "a2"), List(33, 22)),
(3, "c", 11, "cc", 22, 3, "c", List("c2"), List(22)))
.toDF("a_key", "a_value", "a_range", "b_value", "b_range", "k1", "k2", "c_value", "c_range"))
}
test("query with having condition should rewrite - aggregate & multi join") {
val sqlText =
s"""
|select max(a.key) as a_key,min(a.value) a_value,first(a.range) a_range,
|last(b.value) b_value,last(b.range) b_range,
|c.k1,c.k2,collect_list(c.value) c_value,collect_list(c.range) c_range
|from star.`$tablePath1` a,star.`$tablePath2` b,star.`$tablePath3` c
|where a.key=b.key and b.key=c.k1 and a.range>=5 and c.range>10
|group by c.k1,c.k2
|having min(a.value)='c'
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath6))
checkAnswer(query.select("a_key", "a_value", "a_range", "b_value", "b_range", "k1", "k2", "c_value", "c_range"),
Seq(
(3, "c", 11, "cc", 22, 3, "c", List("c2"), List(22)))
.toDF("a_key", "a_value", "a_range", "b_value", "b_range", "k1", "k2", "c_value", "c_range"))
}
test("query with external condition under aggregate shouldn't rewrite - aggregate & multi join") {
val sqlText =
s"""
|select max(a.key) as a_key,min(a.value) a_value,first(a.range) a_range,
|last(b.value) b_value,last(b.range) b_range,
|c.k1,c.k2,collect_list(c.value) c_value,collect_list(c.range) c_range
|from star.`$tablePath1` a,star.`$tablePath2` b,star.`$tablePath3` c
|where a.key=b.key and b.key=c.k1 and a.range>=5 and c.range>10 and a.value='c'
|group by c.k1,c.k2
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath6))
}
}
class RewriteWithOrCondition extends RewriteQueryByMaterialViewBase {
import testImplicits._
val viewName7: String = "material_view7"
val viewPath7: String = Utils.createTempDir().getCanonicalPath
val viewName8: String = "material_view8"
val viewPath8: String = Utils.createTempDir().getCanonicalPath
override def prepareMaterialViews(): Unit = {
val sqlText7 =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>1 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=5
""".stripMargin
StarTable.createMaterialView(
viewName7,
viewPath7,
sqlText7
)
val sqlText8 =
s"""
|select a.k1,k2,value,range,length(range) as lr
|from star.`$tablePath3` a
|where ((k1>1 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=1) and k1>=0
""".stripMargin
StarTable.createMaterialView(
viewName8,
viewPath8,
sqlText8
)
}
override def cleanMaterialViews(): Unit = {
dropTable(viewPath7)
dropTable(viewPath8)
}
test("same query should rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>1 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath7))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(3, "c", "c2", 22, 2, "3,c,c2"),
(4, "d", "d3", 33, 2, "4,d,d3"),
(5, "e", "e2", 22, 2, "5,e,e2"),
(5, "e", "e3", 33, 2, "5,e,e3"),
(6, "f", "f3", 33, 2, "6,f,f3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query with less or condition should rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>1 and range>=30) or k1=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath7))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(4, "d", "d3", 33, 2, "4,d,d3"),
(5, "e", "e2", 22, 2, "5,e,e2"),
(5, "e", "e3", 33, 2, "5,e,e3"),
(6, "f", "f3", 33, 2, "6,f,f3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query with or condition inbounds should rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>2 and range>30 and value='e3') or (k1<3 and range<24 and value!='a2')
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath7))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(5, "e", "e3", 33, 2, "5,e,e3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query without or condition inbounds should rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1>1 and range>=30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath7))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(4, "d", "d3", 33, 2, "4,d,d3"),
(5, "e", "e3", 33, 2, "5,e,e3"),
(6, "f", "f3", 33, 2, "6,f,f3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query without or condition inbounds should rewrite (equal replace range) - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1=4 and range>=30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath7))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(4, "d", "d3", 33, 2, "4,d,d3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query with more or condition shouldn't rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>1 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=5 or k1=4
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
println(query.queryExecution.toString())
assert(!plan.contains(viewPath7))
}
test("query with or condition not inbounds shouldn't rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where (k1>0 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath7))
}
test("query without or condition not inbounds shouldn't rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1=0 and range>=30
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath7))
}
test("query without condition shouldn't rewrite - or") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath7))
}
test("same query should rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where ((k1>1 and range>=30) or (k1<=3 and range<25 and value!='a2') or k1=1) and k1>=0
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath8))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(1, "a", "a2", 22, 2, "1,a,a2"),
(1, "a", "a3", 33, 2, "1,a,a3"),
(3, "c", "c2", 22, 2, "3,c,c2"),
(4, "d", "d3", 33, 2, "4,d,d3"),
(5, "e", "e3", 33, 2, "5,e,e3"),
(6, "f", "f3", 33, 2, "6,f,f3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query with external condition should rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where ((k1<=3 and range<25 and value!='a2' and k2='c') or k1=1) and k1>=0
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath8))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(1, "a", "a2", 22, 2, "1,a,a2"),
(1, "a", "a3", 33, 2, "1,a,a3"),
(3, "c", "c2", 22, 2, "3,c,c2"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query without or condition inbounds should rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1=1
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(plan.contains(viewPath8))
checkAnswer(query.select("k1", "k2", "value", "range", "lr", "cw"),
Seq(
(1, "a", "a2", 22, 2, "1,a,a2"),
(1, "a", "a3", 33, 2, "1,a,a3"))
.toDF("k1", "k2", "value", "range", "lr", "cw"))
}
test("query with or condition not inbounds shouldn't rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where ((k1>1 and range>=30) or (k1<=4 and range<25 and value!='a2') or k1=1) and k1>=0
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath8))
}
test("query without or condition not inbounds shouldn't rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1=5
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath8))
}
test("query without condition shouldn't rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
println(query.queryExecution.toString())
assert(!plan.contains(viewPath8))
}
test("query without or condition (just and condition) shouldn't rewrite - or & and") {
val sqlText =
s"""
|select a.k1,k2,value,range,length(range) as lr,concat_ws(',',k1,k2,a.value) as cw
|from star.`$tablePath3` a
|where k1>=0
""".stripMargin
val query = spark.sql(sqlText)
val plan = query.queryExecution.optimizedPlan.toString()
assert(!plan.contains(viewPath8))
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/rules/StarLakePostHocAnalysis.scala
|
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.rules
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.expressions.{And, EqualTo}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.{LeftAnti, LeftSemi}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.star.SnapshotManagement
import org.apache.spark.sql.star.catalog.StarLakeTableV2
case class StarLakePostHocAnalysis(spark: SparkSession) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsDown {
/**
* Hash columns in StarTable are primary keys, they have no duplicate data and shouldn't be null,
* so [[Intersect]]/[[Except]] operator can be replaced with a left-semi/left-anti [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 INTERSECT/EXCEPT (ALL) SELECT b1, b2 FROM Tab2
* ==> SELECT a1, a2 FROM Tab1 LEFT SEMI/ANTI JOIN Tab2 ON a1=b1 AND a2=b2
* }}}
*
* Note:
* This rule is only applicable to INTERSECT/EXCEPT StarTable hash columns.
*/
case Intersect(left, right, _) =>
assert(left.output.size == right.output.size)
val transLeft = EliminateSubqueryAliases(left)
val transRight = EliminateSubqueryAliases(right)
//make sure the output Attributes are hash columns
val leftInfo = findStarLakeRelation(transLeft, transLeft.references.map(_.name).toSet)
val rightInfo = findStarLakeRelation(transRight, transRight.references.map(_.name).toSet)
val canOptimize = leftInfo._1 && rightInfo._1 && (leftInfo._2 == rightInfo._2)
if (canOptimize) {
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualTo(l, r) }
Join(left, right, LeftSemi, joinCond.reduceLeftOption(And), JoinHint.NONE)
} else {
plan
}
case Except(left, right, _) =>
assert(left.output.size == right.output.size)
val transLeft = EliminateSubqueryAliases(left)
val transRight = EliminateSubqueryAliases(right)
//make sure the output Attributes are hash columns
val leftInfo = findStarLakeRelation(transLeft, transLeft.references.map(_.name).toSet)
val rightInfo = findStarLakeRelation(transRight, transRight.references.map(_.name).toSet)
val canOptimize = leftInfo._1 && rightInfo._1 && (leftInfo._2 == rightInfo._2)
if (canOptimize) {
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualTo(l, r) }
Join(left, right, LeftAnti, joinCond.reduceLeftOption(And), JoinHint.NONE)
} else {
plan
}
}
def findStarLakeRelation(plan: LogicalPlan, outCols: Set[String]): (Boolean, Int) = {
plan match {
case DataSourceV2Relation(StarLakeTableV2(_, path, _, _, _, _), _, _, _, _) =>
val tableInfo = SnapshotManagement(path).getTableInfoOnly
val hashCols = tableInfo.hash_partition_columns.toSet
if (hashCols.equals(outCols) && tableInfo.bucket_num != -1) {
(true, tableInfo.bucket_num)
} else {
(false, -1)
}
case lp: LogicalPlan =>
if (lp.children.size == 1) {
findStarLakeRelation(lp.children.head, outCols)
} else {
(false, -1)
}
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/merge_operator/MergeColumnarBatchNew.scala
|
<filename>src/main/scala/org/apache/spark/sql/execution/datasources/v2/merge/parquet/batch/merge_operator/MergeColumnarBatchNew.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.merge_operator
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.v2.merge.parquet.batch.MergeOperatorColumnarBatchRow
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.vectorized.ColumnVector
/**
* Construct a column batch for merged result.
*
* @param columns ordered column vectors of all file
* @param mergeOps merge operators
* @param indexTypeArray result schema index and type
*/
class MergeColumnarBatchNew(columns: Array[ColumnVector],
mergeOps: Seq[MergeOperator[Any]],
indexTypeArray: Seq[FieldIndex]) extends AutoCloseable {
val row = new MergeOperatorColumnarBatchRow(columns, mergeOps, indexTypeArray)
def getRow(resultIndex: Seq[Seq[MergeColumnIndex]]): InternalRow = {
row.idMix = resultIndex
row.mergeValues()
row
}
def getMergeRow(resultIndex: Seq[Seq[MergeColumnIndex]]): MergeOperatorColumnarBatchRow = {
row.idMix = resultIndex
row.mergeValues()
row
}
override def close(): Unit = {
for (c <- columns) {
c.close()
}
}
}
|
engine-plus/StarLake
|
src/main/scala/org/apache/spark/sql/star/commands/alterTableCommands.scala
|
<reponame>engine-plus/StarLake<gh_stars>10-100
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.star.commands
import org.apache.spark.sql.catalyst.analysis.{Resolver, UnresolvedAttribute}
import org.apache.spark.sql.catalyst.plans.logical.{IgnoreCachedData, QualifiedColType}
import org.apache.spark.sql.connector.catalog.TableChange.{After, ColumnPosition, First}
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter
import org.apache.spark.sql.star.catalog.StarLakeTableV2
import org.apache.spark.sql.star.exception.StarLakeErrors
import org.apache.spark.sql.star.schema.SchemaUtils
import org.apache.spark.sql.star.utils.DataFileInfo
import org.apache.spark.sql.star.{StarLakeConfig, TransactionCommit}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import scala.util.control.NonFatal
/**
* A super trait for alter table commands that modify Star tables.
*/
trait AlterTableCommand extends Command {
def table: StarLakeTableV2
protected def startTransaction(): TransactionCommit = {
val tc = table.snapshotManagement.startTransaction()
if (tc.isFirstCommit) {
throw StarLakeErrors.notAnStarLakeSourceException(table.name())
}
tc
}
}
/**
* A command that sets star table configuration.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 SET TBLPROPERTIES ('key1' = 'val1', 'key2' = 'val2', ...);
* }}}
*/
case class AlterTableSetPropertiesCommand(
table: StarLakeTableV2,
configuration: Map[String, String])
extends RunnableCommand with AlterTableCommand with IgnoreCachedData {
override def run(sparkSession: SparkSession): Seq[Row] = {
val tc = startTransaction()
val tableInfo = tc.tableInfo
val newTableInfo = tableInfo.copy(configuration = tableInfo.configuration ++ configuration)
tc.commit(Seq.empty[DataFileInfo], Seq.empty[DataFileInfo], newTableInfo)
Seq.empty[Row]
}
}
/**
* A command that unsets Star table configuration.
* If ifExists is false, each individual key will be checked if it exists or not, it's a
* one-by-one operation, not an all or nothing check. Otherwise, non-existent keys will be ignored.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 UNSET TBLPROPERTIES [IF EXISTS] ('key1', 'key2', ...);
* }}}
*/
case class AlterTableUnsetPropertiesCommand(
table: StarLakeTableV2,
propKeys: Seq[String],
ifExists: Boolean)
extends RunnableCommand with AlterTableCommand with IgnoreCachedData {
override def run(sparkSession: SparkSession): Seq[Row] = {
val tc = startTransaction()
val tableInfo = tc.tableInfo
val normalizedKeys = StarLakeConfig.normalizeConfigKeys(propKeys)
if (!ifExists) {
normalizedKeys.foreach { k =>
if (!tableInfo.configuration.contains(k)) {
throw new AnalysisException(
s"Attempted to unset non-existent property '$k' in table ${table.name()}")
}
}
}
val newConfiguration = tableInfo.configuration.filterNot {
case (key, _) => normalizedKeys.contains(key)
}
val newTableInfo = tableInfo.copy(configuration = newConfiguration)
tc.commit(Seq.empty[DataFileInfo], Seq.empty[DataFileInfo], newTableInfo)
Seq.empty[Row]
}
}
/**
* A command that add columns to a star table.
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
* }}}
*/
case class AlterTableAddColumnsCommand(
table: StarLakeTableV2,
colsToAddWithPosition: Seq[QualifiedColType])
extends RunnableCommand with AlterTableCommand with IgnoreCachedData {
override def run(sparkSession: SparkSession): Seq[Row] = {
val tc = startTransaction()
if (SchemaUtils.filterRecursively(
StructType(colsToAddWithPosition.map {
case QualifiedColTypeWithPosition(_, column, _) => column
}), true)(!_.nullable).nonEmpty) {
throw StarLakeErrors.operationNotSupportedException("NOT NULL in ALTER TABLE ADD COLUMNS")
}
// TODO: remove this after auto cache refresh is merged.
table.tableIdentifier.foreach { identifier =>
try sparkSession.catalog.uncacheTable(identifier) catch {
case NonFatal(e) =>
log.warn(s"Exception when attempting to uncache table $identifier", e)
}
}
val tableInfo = tc.tableInfo
val oldSchema = tableInfo.schema
val resolver = sparkSession.sessionState.conf.resolver
val newSchema = colsToAddWithPosition.foldLeft(oldSchema) {
case (schema, QualifiedColTypeWithPosition(columnPath, column, None)) =>
val (parentPosition, lastSize) =
SchemaUtils.findColumnPosition(columnPath, schema, resolver)
SchemaUtils.addColumn(schema, column, parentPosition :+ lastSize)
case (schema, QualifiedColTypeWithPosition(columnPath, column, Some(_: First))) =>
val (parentPosition, _) = SchemaUtils.findColumnPosition(columnPath, schema, resolver)
SchemaUtils.addColumn(schema, column, parentPosition :+ 0)
case (schema,
QualifiedColTypeWithPosition(columnPath, column, Some(after: After))) =>
val (prevPosition, _) =
SchemaUtils.findColumnPosition(columnPath :+ after.column, schema, resolver)
val position = prevPosition.init :+ (prevPosition.last + 1)
SchemaUtils.addColumn(schema, column, position)
}
SchemaUtils.checkColumnNameDuplication(newSchema, "in adding columns")
ParquetSchemaConverter.checkFieldNames(SchemaUtils.explodeNestedFieldNames(newSchema))
val newTableInfo = tableInfo.copy(table_schema = newSchema.json)
tc.commit(Seq.empty[DataFileInfo], Seq.empty[DataFileInfo], newTableInfo)
Seq.empty[Row]
}
object QualifiedColTypeWithPosition {
def unapply(col: QualifiedColType): Option[(Seq[String], StructField, Option[ColumnPosition])] = {
val builder = new MetadataBuilder
col.comment.foreach(builder.putString("comment", _))
val field = StructField(col.name.last, col.dataType, col.nullable, builder.build())
Some((col.name.init, field, col.position))
}
}
}
/**
* A command to change the column for a Star table, support changing the comment of a column and
* reordering columns.
*
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* CHANGE [COLUMN] column_old_name column_new_name column_dataType [COMMENT column_comment]
* [FIRST | AFTER column_name];
* }}}
*/
case class AlterTableChangeColumnCommand(
table: StarLakeTableV2,
columnPath: Seq[String],
columnName: String,
newColumn: StructField,
colPosition: Option[ColumnPosition])
extends RunnableCommand with AlterTableCommand with IgnoreCachedData {
override def run(sparkSession: SparkSession): Seq[Row] = {
val tc = startTransaction()
val tableInfo = tc.tableInfo
val oldSchema = tableInfo.schema
val resolver = sparkSession.sessionState.conf.resolver
// Verify that the columnName provided actually exists in the schema
SchemaUtils.findColumnPosition(columnPath :+ columnName, oldSchema, resolver)
val newSchema = SchemaUtils.transformColumnsStructs(oldSchema, columnName) {
case (`columnPath`, struct@StructType(fields), _) =>
val oldColumn = struct(columnName)
verifyColumnChange(struct(columnName), resolver)
// Take the comment, nullability and data type from newField
val newField = newColumn.getComment().map(oldColumn.withComment).getOrElse(oldColumn)
.copy(
dataType =
SchemaUtils.changeDataType(oldColumn.dataType, newColumn.dataType, resolver),
nullable = newColumn.nullable)
// Replace existing field with new field
val newFieldList = fields.map { field =>
if (field.name == columnName) newField else field
}
// Reorder new field to correct position if necessary
colPosition.map { position =>
reorderFieldList(struct, newFieldList, newField, position, resolver)
}.getOrElse(newFieldList.toSeq)
case (_, _@StructType(fields), _) => fields
}
val newTableInfo = tableInfo.copy(table_schema = newSchema.json)
tc.commit(Seq.empty[DataFileInfo], Seq.empty[DataFileInfo], newTableInfo)
Seq.empty[Row]
}
/**
* Reorder the given fieldList to place `field` at the given `position` in `fieldList`
*
* @param struct The initial StructType with the original field at its original position
* @param fieldList List of fields with the changed field in the original position
* @param field The field that is to be added
* @param position Position where the field is to be placed
* @return Returns a new list of fields with the changed field in the new position
*/
private def reorderFieldList(
struct: StructType,
fieldList: Array[StructField],
field: StructField,
position: ColumnPosition,
resolver: Resolver): Seq[StructField] = {
val startIndex = struct.fieldIndex(columnName)
val filtered = fieldList.filterNot(_.name == columnName)
val newFieldList = position match {
case _: First =>
field +: filtered
case after: After if after.column() == columnName =>
filtered.slice(0, startIndex) ++
Seq(field) ++
filtered.slice(startIndex, filtered.length)
case after: After =>
val endIndex = filtered.indexWhere(i => resolver(i.name, after.column()))
if (endIndex < 0) {
throw StarLakeErrors.columnNotInSchemaException(after.column(), struct)
}
filtered.slice(0, endIndex + 1) ++
Seq(field) ++
filtered.slice(endIndex + 1, filtered.length)
}
newFieldList.toSeq
}
/**
* Given two columns, verify whether replacing the original column with the new column is a valid
* operation
*
* @param originalField The existing column
*/
private def verifyColumnChange(
originalField: StructField,
resolver: Resolver): Unit = {
originalField.dataType match {
case same if same == newColumn.dataType =>
// just changing comment or position so this is fine
case s: StructType if s != newColumn.dataType =>
val fieldName = UnresolvedAttribute(columnPath :+ columnName).name
throw new AnalysisException(
s"Cannot update ${table.name()} field $fieldName type: " +
s"update a struct by adding, deleting, or updating its fields")
case m: MapType if m != newColumn.dataType =>
val fieldName = UnresolvedAttribute(columnPath :+ columnName).name
throw new AnalysisException(
s"Cannot update ${table.name()} field $fieldName type: " +
s"update a map by updating $fieldName.key or $fieldName.value")
case a: ArrayType if a != newColumn.dataType =>
val fieldName = UnresolvedAttribute(columnPath :+ columnName).name
throw new AnalysisException(
s"Cannot update ${table.name()} field $fieldName type: " +
s"update the element by updating $fieldName.element")
case _: AtomicType =>
// update is okay
case o =>
throw new AnalysisException(s"Cannot update ${table.name()} field of type $o")
}
if (columnName != newColumn.name ||
SchemaUtils.canChangeDataType(originalField.dataType, newColumn.dataType, resolver,
columnPath :+ originalField.name).nonEmpty ||
(originalField.nullable && !newColumn.nullable)) {
throw StarLakeErrors.alterTableChangeColumnException(
s"'${UnresolvedAttribute(columnPath :+ originalField.name).name}' with type " +
s"'${originalField.dataType}" +
s" (nullable = ${originalField.nullable})'",
s"'${UnresolvedAttribute(Seq(newColumn.name)).name}' with type " +
s"'${newColumn.dataType}" +
s" (nullable = ${newColumn.nullable})'")
}
}
}
/**
* A command to replace columns for a StarTable, support changing the comment of a column,
* reordering columns, and loosening nullabilities.
*
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier REPLACE COLUMNS (col_spec[, col_spec ...]);
* }}}
*/
case class AlterTableReplaceColumnsCommand(
table: StarLakeTableV2,
columns: Seq[StructField])
extends RunnableCommand with AlterTableCommand with IgnoreCachedData {
override def run(sparkSession: SparkSession): Seq[Row] = {
val tc = startTransaction()
val tableInfo = tc.tableInfo
val existingSchema = tableInfo.schema
val resolver = sparkSession.sessionState.conf.resolver
val changingSchema = StructType(columns)
SchemaUtils.canChangeDataType(existingSchema, changingSchema, resolver).foreach { operation =>
throw StarLakeErrors.alterTableReplaceColumnsException(
existingSchema, changingSchema, operation)
}
val newSchema = SchemaUtils.changeDataType(existingSchema, changingSchema, resolver)
.asInstanceOf[StructType]
SchemaUtils.checkColumnNameDuplication(newSchema, "in replacing columns")
ParquetSchemaConverter.checkFieldNames(SchemaUtils.explodeNestedFieldNames(newSchema))
val newTableInfo = tableInfo.copy(table_schema = newSchema.json)
tc.commit(Seq.empty[DataFileInfo], Seq.empty[DataFileInfo], newTableInfo)
Seq.empty[Row]
}
}
|
engine-plus/StarLake
|
src/main/scala/com/engineplus/star/meta/MetaLock.scala
|
<filename>src/main/scala/com/engineplus/star/meta/MetaLock.scala
/*
* Copyright [2021] [EnginePlus Team]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.engineplus.star.meta
object MetaLock {
private val cassandraConnector = MetaUtils.cassandraConnector
private val database = MetaUtils.DATA_BASE
def lock(lockId: String, commitId: String): (Boolean, String) = {
cassandraConnector.withSessionDo(session => {
val re = session.execute(
s"""
|insert into $database.lock_info(lock_id,commit_id)
|values('$lockId','$commitId')
|if not exists
""".stripMargin)
if (re.wasApplied()) {
(true, "")
} else {
(false, re.one().getString("commit_id"))
}
})
}
def unlock(lockId: String, commitId: String): Boolean = {
cassandraConnector.withSessionDo(session => {
session.execute(
s"""
|delete from $database.lock_info
|where lock_id='$lockId'
|if commit_id='$commitId'
""".stripMargin).wasApplied()
})
}
}
|
chdmwu/CaffeOnSpark
|
caffe-grid/src/main/scala/com/yahoo/ml/caffe/CaffeOnSpark.scala
|
<filename>caffe-grid/src/main/scala/com/yahoo/ml/caffe/CaffeOnSpark.scala
// Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache 2.0 license.
// Please see LICENSE file in the project root for terms.
package com.yahoo.ml.caffe
import java.io.{FileReader, PrintWriter}
import java.net.InetAddress
import caffe.Caffe._
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql
import org.apache.spark.sql.types.{FloatType, StructField, StructType, ArrayType, StringType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.sql.functions.udf
import org.slf4j.{LoggerFactory, Logger}
import scala.collection.mutable
object CaffeOnSpark {
private val log: Logger = LoggerFactory.getLogger(this.getClass)
def main(args: Array[String]) {
val sc_conf = new SparkConf()
sc_conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.scheduler.minRegisteredResourcesRatio", "1.0")
val sc: SparkContext = new SparkContext(sc_conf)
//Caffe-on-Spark configuration
var conf = new Config(sc, args)
//training if specified
val caffeSpark = new CaffeOnSpark(sc)
if (conf.isTraining) {
val source = DataSource.getSource(conf, true)
caffeSpark.train(source)
}
//feature extraction
if (conf.isFeature || conf.isTest) {
val source = DataSource.getSource(conf, false)
if (conf.isFeature) {
//feature extraction
val featureDF = caffeSpark.features(source)
//save extracted features into the specified file
val rdf = featureDF.write.format(source.conf.outputFormat).save(source.conf.outputPath)
} else {
//test
val result = caffeSpark.test(source)
//save test results into a local file
val outputPath = source.conf.outputPath
var localFilePath: String = outputPath
if (outputPath.startsWith(FSUtils.localfsPrefix))
localFilePath = outputPath.substring(FSUtils.localfsPrefix.length)
else
localFilePath = System.getProperty("user.dir") + "/test_result.tmp"
val out: PrintWriter = new PrintWriter(localFilePath)
result.map {
case (name, r) => {
out.println(name + ": " + r.mkString(","))
}
}
out.close
//upload the result file available on HDFS
if (!outputPath.startsWith(FSUtils.localfsPrefix))
FSUtils.CopyFileToHDFS(localFilePath, outputPath)
}
}
}
}
/**
* CaffeOnSpark is the main class for distributed deep learning.
* It will launch multiple Caffe cores within Spark executors, and conduct coordinated learning from HDFS datasets.
*
* @param sc Spark Context
*/
class CaffeOnSpark(@transient val sc: SparkContext) extends Serializable {
@transient private val log: Logger = LoggerFactory.getLogger(this.getClass)
@transient val floatarray2doubleUDF = udf((float_features: Seq[Float]) => {
float_features(0).toDouble
})
@transient val floatarray2doublevectorUDF = udf((float_features: Seq[Float]) => {
val double_features = new Array[Double](float_features.length)
for (i <- 0 until float_features.length) double_features(i) = float_features(i)
Vectors.dense(double_features)
})
/**
* Training with a specific data source
* @param source input data source
*/
def train[T1, T2](source: DataSource[T1, T2]): Unit = {
var trainDataRDD: RDD[T1] = source.makeRDD(sc)
if (trainDataRDD == null) {
log.info("No training data is given")
return
}
//Phase 1: Gather RDMA addresses from executors
val conf = source.conf
if (!conf.snapshotStateFile.isEmpty && conf.snapshotModelFile.isEmpty) {
log.error("to resume training, please provide input model file")
return
}
var rank_2_addresses_n_host = sc.parallelize(0 until conf.clusterSize, conf.clusterSize).map {
case rank: Int => {
val processor = CaffeProcessor.instance[T1, T2](source, rank)
//announce local RDMA address
if (conf.clusterSize > 1) {
(rank, processor.getLocalAddress(), InetAddress.getLocalHost.getHostName)
} else {
(rank, new Array[String](1), InetAddress.getLocalHost.getHostName)
}
}
}.collect()
for (i <- rank_2_addresses_n_host)
log.info("rank = " + i._1 + ", address = " + i._2.mkString(",") + ", hostname = " + i._3)
var numExecutors: Int = sc.getExecutorMemoryStatus.size
val numDriver: Int = if (sc.isLocal) 0 else 1
if (conf.clusterSize + numDriver != numExecutors) {
log.error("Requested # of executors: " + conf.clusterSize + " actual # of executors:" + (numExecutors - numDriver) +
". Please try to set --conf spark.scheduler.maxRegisteredResourcesWaitingTime with a large value (default 30s)")
throw new IllegalStateException("actual number of executors is not as expected")
}
//Phase 2: bcast RDMA addresses
val rank_2_addresses = rank_2_addresses_n_host.map {
case (rank, rdma_addr, host) => {
if (rank == 0) log.info("rank 0:" + host)
(rank, rdma_addr)
}
}
val bcast_addresses = sc.broadcast(rank_2_addresses)
//Phase 3: set up the processors
sc.parallelize(0 until conf.clusterSize, conf.clusterSize).map {
case rank: Int => {
val processor = CaffeProcessor.instance[T1, T2]()
//start processor w/ the given addresses
processor.start(bcast_addresses.value)
}
}.collect()
//Phase 4: repartition RDD if needed
val origin_part_count = trainDataRDD.partitions.size
val desired_part_count = (origin_part_count / conf.clusterSize) * conf.clusterSize
if (origin_part_count != desired_part_count) {
trainDataRDD = trainDataRDD.coalesce(desired_part_count, true)
log.info("Training dataset partition count: " + origin_part_count + " -> " + desired_part_count)
}
if (conf.isRddPersistent) {
trainDataRDD = trainDataRDD.persist(StorageLevel.DISK_ONLY)
}
//Phase 5: find the minimum size of partitions
var minPartSize = 0
if (conf.clusterSize > 1) {
val sizeRDD = trainDataRDD.mapPartitions {
iter => {
val partSize = iter.size
// Spark decides how data partitions are distributed among executors in this step.
// synchronize among the executors,
// to achieve same number of partitions.
val processor = CaffeProcessor.instance[T1, T2]()
processor.sync()
Iterator(partSize)
}
}.persist()
minPartSize = sizeRDD.min()
log.info("Partition size: min=" + minPartSize + " max=" + sizeRDD.max())
}
//Phase 6: feed the processor
var continue: Boolean = true
while (continue) {
//conduct training with dataRDD
continue = trainDataRDD.mapPartitions {
iter => {
var res = false
//feed training data from iterator
val processor = CaffeProcessor.instance[T1, T2]()
if (!processor.solversFinished) {
if (minPartSize > 0) {
var idx = 0
//the entire iterator needs to be consumed, otherwise GC won't be triggered
res = iter.map { sample => {
idx += 1
if (idx <= minPartSize) processor.feedQueue(sample) else true
}}.reduce(_ && _)
} else {
res = iter.map { sample => processor.feedQueue(sample) }.reduce(_ && _)
}
processor.solversFinished = !res
}
Iterator(res)
}
}.reduce(_ && _)
}
//Phase 7: shutdown processors
shutdownProcessors(conf)
}
/**
* a utility function for shutting processor thread pool
*/
private def shutdownProcessors[T1, T2](conf: Config): Unit = {
sc.parallelize(0 until conf.clusterSize, conf.clusterSize).map {
_ => {
val processor = CaffeProcessor.instance[T1, T2]()
processor.stop()
}
}.collect()
}
/**
* Test with a specific data source.
* Test result will be saved into HDFS file per configuration.
*
* @param source input data source
* @return key/value map for mean values of output layers
*/
def test[T1, T2](source: DataSource[T1, T2]): Map[String, Seq[Double]] = {
source.conf.isTest = true
val testDF = features2(source)
var result = new mutable.HashMap[String, Seq[Double]]
// compute the mean of the columns
testDF.columns.zipWithIndex.map {
case (name, index) => {
if (index > 0) {
// first column is SampleId, ignored.
val n: Int = testDF.take(1)(0).getSeq[Double](index).size
val ndf = testDF.agg(new VectorMean(n)(testDF(name)))
val r: Seq[Double] = ndf.take(1)(0).getSeq[Double](0)
result(name) = r
}
}
}
//shutdown processors
shutdownProcessors(source.conf)
result.toMap
}
/**
* Extract features from a specific data source.
* Features will be saved into DataFrame per configuration.
*
* @param source input data source
* @return Feature data frame
*/
def features[T1, T2](source: DataSource[T1, T2]): DataFrame = {
source.conf.isTest = false
var featureDF = features2(source)
//take action to force featureDF persisted
featureDF.count()
//shutdown processors
shutdownProcessors(source.conf)
featureDF
}
/**
* Extract features from a data source
* @param source input data source
* @return a data frame
*/
private def features2[T1, T2](source: DataSource[T1, T2]): DataFrame = {
val srcDataRDD = source.makeRDD(sc)
val conf = source.conf
val clusterSize: Int = conf.clusterSize
//Phase 1: start Caffe processor within each executor
val size = sc.parallelize(0 until clusterSize, clusterSize).map {
case rank: Int => {
// each processor has clusterSize 1 and rank 0
val processor = CaffeProcessor.instance[T1, T2](source, rank)
}
}.count()
if (size < clusterSize) {
log.error((clusterSize - size) + "executors have failed. Please check Spark executor logs")
throw new IllegalStateException("Executor failed at CaffeProcessor startup for test/feature extraction")
}
// Sanity check
val numExecutors: Int = sc.getExecutorMemoryStatus.size
val numDriver: Int = if (sc.isLocal) 0 else 1
if ((size + numDriver) != sc.getExecutorMemoryStatus.size) {
log.error("Requested # of executors: " + clusterSize + " actual # of executors:" + (numExecutors - numDriver) +
". Please try to set --conf spark.scheduler.maxRegisteredResourcesWaitingTime with a large value (default 30s)")
throw new IllegalStateException("actual number of executors is not as expected")
}
// Phase 2 get output schema
val blobNames = if (conf.isFeature)
conf.features
else // this is test mode
sc.parallelize(0 until clusterSize, clusterSize).map { _ =>
val processor = CaffeProcessor.instance[T1, T2]()
processor.getTestOutputBlobNames
}.collect()(0)
val schema = new StructType(Array(StructField("SampleID", StringType, false)) ++ blobNames.map(name => StructField(name, ArrayType(FloatType), false)))
log.info("Schema:" + schema)
//Phase 3: feed the processors
val featureRDD = srcDataRDD.mapPartitions {
iter => {
val processor: CaffeProcessor[T1, T2] = CaffeProcessor.instance[T1, T2]()
val feature_iter: Iterator[Row] =
if (processor.solversFinished)
Iterator()
else {
processor.synchronized {
processor.start(null)
val res = iter.map { sample => processor.feedQueue(sample) }.reduce(_ && _)
processor.solversFinished = !res
processor.stopThreads()
import scala.collection.JavaConversions._
processor.results.iterator
}
}
feature_iter
}
}
//Phase 4: Create output data frame
val sqlContext = new sql.SQLContext(sc)
sqlContext.createDataFrame(featureRDD, schema).persist(StorageLevel.DISK_ONLY)
}
}
|
chdmwu/CaffeOnSpark
|
caffe-grid/src/main/scala/com/yahoo/ml/caffe/ImageDataFrame.scala
|
<reponame>chdmwu/CaffeOnSpark
// Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache 2.0 license.
// Please see LICENSE file in the project root for terms.
package com.yahoo.ml.caffe
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.storage.StorageLevel
/**
* ImageDataFrame is a built-in data source class using Spark dataframe format.
*
* ImageDataFrame expects dataframe with 2 required columns (lable:String, data:byte[]),
* and 5 optional columns (id: String, channels :Int, height:Int, width:Int, encoded: Boolean).
*
* ImageDataFrame could be configured via the following MemoryDataLayer parameter:
* (1) dataframe_column_select ... a collection of dataframe SQL selection statements
* (ex. "sampleId as id", "abs(height) as height")
* (2) image_encoded ... indicate whether image data are encoded or not. (default: false)
* (3) dataframe_format ... Dataframe Format. (default: parquet)
*
* @param conf CaffeSpark configuration
* @param layerId the layer index in the network protocol file
* @param isTrain
*/
class ImageDataFrame(conf: Config, layerId: Int, isTrain: Boolean)
extends ImageDataSource(conf, layerId, isTrain) {
/* construct a sample RDD */
def makeRDD(sc: SparkContext): RDD[(String, String, Int, Int, Int, Boolean, Array[Byte])] = {
val sqlContext = new SQLContext(sc)
//load DataFrame
var reader = sqlContext.read
if (memdatalayer_param.hasDataframeFormat())
reader = reader.format(memdatalayer_param.getDataframeFormat())
var df: DataFrame = reader.load(sourceFilePath)
//select columns if specified
if (memdatalayer_param.getDataframeColumnSelectCount() > 0) {
val selects = memdatalayer_param.getDataframeColumnSelectList()
import scala.collection.JavaConversions._
df = df.selectExpr(selects.toList:_*)
}
//check optional columns
val column_names : Array[String] = df.columns
val has_id : Boolean = column_names.contains("id")
val has_channels : Boolean = column_names.contains("channels")
val has_height : Boolean = column_names.contains("height")
val has_width : Boolean = column_names.contains("width")
val has_encoded : Boolean = column_names.contains("encoded")
//mapping each row to RDD tuple
df.map(row => {
var id: String = if (!has_id) "" else row.getAs[String]("id")
var label: String = row.getAs[String]("label")
val channels : Int = if (!has_channels) 0 else row.getAs[Int]("channels")
val height : Int = if (!has_height) 0 else row.getAs[Int]("height")
val width : Int = if (!has_width) 0 else row.getAs[Int]("width")
val encoded : Boolean = if (!has_encoded) memdatalayer_param.getImageEncoded() else row.getAs[Boolean]("encoded")
val data : Array[Byte] = row.getAs[Any]("data") match {
case str: String => str.getBytes
case arr: Array[Byte@unchecked] => arr
case _ => {
log.error("Unsupport value type")
null
}
}
(id, label, channels, height, width, encoded, data)
}).persist(StorageLevel.DISK_ONLY)
}
}
|
jmategk0/enron_email_parser
|
spark_enron_parser.scala
|
<filename>spark_enron_parser.scala
//import stuff for tests
import org.apache.spark.sql.SparkSession
val ss = SparkSession.
builder().
master("local").
appName("Spark in Motion Example").
config("spark.config.option", "some-value").
enableHiveSupport().
getOrCreate()
import ss.implicits._
import org.apache.spark.sql.functions._
import java.util.Date;
def parseEmail(email: String): (String, String, String, String, String) = {
val fields = email.split("Subject:")(0).split('\n')
val messageId = fields(0).split(":")(1).trim
val date = fields(2).split(":")(1).trim
val from = fields(1).split(":")(1).trim
val toStr = ""
if (fields.length > 3) {
val toArr = fields.slice(3, fields.length).map(x => x.trim)
toStr = toArr.mkString(":")(1).trim
}
(messageId,date,from,toStr,email)
}
val files = sc.WholeTextFiles("file:///data/enron/maildir/*/*/*")
val emails = files.map(x => x._2)
val results = emails.map(x => parseEmail(x))
spark.createDataFrame(results).write.csv("file:///data.assignment2/output2")
|
borissmidt/slick-mysql
|
core/src/main/scala/com/foerstertechnologies/slickmysql/money/MySQLMoneyExtension.scala
|
<filename>core/src/main/scala/com/foerstertechnologies/slickmysql/money/MySQLMoneyExtension.scala<gh_stars>1-10
package com.foerstertechnologies.slickmysql.money
import slick.jdbc.{JdbcType, JdbcTypesComponent, MySQLProfile}
import slick.ast.{Library, LiteralNode, TypedType}
import slick.ast.Library.{SqlFunction, SqlOperator}
import slick.lifted.ExtensionMethods
trait MySQLMoneyExtension extends JdbcTypesComponent {
self: MySQLProfile =>
import self.api._
object MoneyLibrary {
val + = new SqlOperator("+")
val - = new SqlOperator("-")
val * = new SqlOperator("*")
val / = new SqlOperator("/")
}
class MoneyColumnExtensionMethods[MoneyType, P1](val c: Rep[P1])(implicit tm: JdbcType[MoneyType]) extends ExtensionMethods[MoneyType, P1] {
protected implicit def b1Type: TypedType[MoneyType] = implicitly[TypedType[MoneyType]]
}
}
|
borissmidt/slick-mysql
|
addons/circe-json/src/main/scala/com/foerstertechnologies/slickmysql/MySQLCirceJsonSupport.scala
|
package com.foerstertechnologies.slickmysql
import slick.jdbc.{JdbcProfile, JdbcType, MySQLProfile, PositionedResult}
import scala.reflect.classTag
import java.sql.{PreparedStatement, ResultSet, SQLData, SQLInput, SQLOutput}
import io.circe.Json
import io.circe.parser._
import slick.jdbc.{GetResult, JdbcType, MySQLProfile, PositionedResult, SetParameter}
import scala.reflect.classTag
trait MySQLCirceJsonSupport extends json.MySQLJsonExtension with utils.MySQLCommonJdbcTypes {
self: MySQLProfile =>
import self.api._
///---
val json: String = "json"
///---
trait CirceJsonCodeGenSupport {
// register types to let `ExModelBuilder` find them
self match {
case profile1: ExMySQLProfile =>
profile1.bindMySQLTypeToScala("json", classTag[Json])
profile1.bindMySQLTypeToScala("jsonb", classTag[Json])
case _ =>
}
}
trait CirceJsonImplicits extends CirceJsonCodeGenSupport {
implicit val circeJsonTypeMapper: JdbcType[Json] =
new GenericJdbcType[Json](
json,
v => parse(v).toOption.getOrElse(Json.Null),
v => v.noSpaces.replace("\\u0000", ""),
java.sql.Types.LONGVARCHAR,
hasLiteralForm = false
)
implicit def circeJsonColumnExtensionMethods(
c: Rep[Json]
): JsonColumnExtensionMethods[Json, Json] = {
new JsonColumnExtensionMethods[Json, Json](c)
}
implicit def circeJsonOptionColumnExtensionMethods(
c: Rep[Option[Json]]
): JsonColumnExtensionMethods[Json, Option[Json]] = {
new JsonColumnExtensionMethods[Json, Option[Json]](c)
}
}
trait CirceJsonPlainImplicits extends CirceJsonCodeGenSupport {
import utils.PlainSQLUtils._
implicit class MySQLJsonPositionedResult(r: PositionedResult) {
def nextJson(): Json = nextJsonOption().getOrElse(Json.Null)
def nextJsonOption(): Option[Json] = r.nextStringOption().flatMap(parse(_).toOption)
}
////////////////////////////////////////////////////////////
implicit val getJson: AnyRef with GetResult[Json] = mkGetResult(_.nextJson())
implicit val getJsonOption: AnyRef with GetResult[Option[Json]] = mkGetResult(
_.nextJsonOption()
)
implicit val setJson: SetParameter[Json] = mkSetParameter[Json](json, _.noSpaces)
implicit val setJsonOption: SetParameter[Option[Json]] =
mkOptionSetParameter[Json](json, _.noSpaces)
}
}
|
borissmidt/slick-mysql
|
addons/jts/src/main/scala/com/foerstertechnologies/slickmysql/MySQLSpatialSupport.scala
|
package com.foerstertechnologies.slickmysql
import java.sql.{PreparedStatement, ResultSet}
import com.foerstertechnologies.slickmysql.spatial._
import org.locationtech.jts.geom._
import slick.ast.FieldSymbol
import slick.jdbc._
import scala.reflect.{ClassTag, classTag}
trait MySQLSpatialSupport extends MySQLSpatialExtension { self: MySQLProfile =>
import self.api._
trait MySQLSpatialCodeGenSupport {
// register types to let `ExMMySQLBuilder` find them
if (self.isInstanceOf[ExMySQLProfile]) {
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("geometry", classTag[Geometry])
}
}
///
trait MySQLSpatialAssistants extends BaseMySQLSpatialAssistence[Geometry, Point, LineString, Polygon, GeometryCollection]
trait MySQLSpatialImplicits extends MySQLSpatialCodeGenSupport {
implicit val geometryTypeMapper: JdbcType[Geometry] = new GeometryJdbcType[Geometry]
implicit val pointTypeMapper: JdbcType[Point] = new GeometryJdbcType[Point]
implicit val polygonTypeMapper: JdbcType[Polygon] = new GeometryJdbcType[Polygon]
implicit val lineStringTypeMapper: JdbcType[LineString] = new GeometryJdbcType[LineString]
implicit val linearRingTypeMapper: JdbcType[LinearRing] = new GeometryJdbcType[LinearRing]
implicit val geometryCollectionTypeMapper: JdbcType[GeometryCollection] = new GeometryJdbcType[GeometryCollection]
implicit val multiPointTypeMapper: JdbcType[MultiPoint] = new GeometryJdbcType[MultiPoint]
implicit val multiPolygonTypeMapper: JdbcType[MultiPolygon] = new GeometryJdbcType[MultiPolygon]
implicit val multiLineStringTypeMapper: JdbcType[MultiLineString] = new GeometryJdbcType[MultiLineString]
///
implicit def geometryColumnExtensionMethods[G1 <: Geometry](c: Rep[G1]) =
new GeometryColumnExtensionMethods[Geometry, Point, LineString, Polygon, GeometryCollection, G1, G1](c)
implicit def geometryOptionColumnExtensionMethods[G1 <: Geometry](c: Rep[Option[G1]]) =
new GeometryColumnExtensionMethods[Geometry, Point, LineString, Polygon, GeometryCollection, G1, Option[G1]](c)
}
trait MySQLSpatialPlainImplicits extends MySQLSpatialCodeGenSupport {
import MySQLSpatialSupportUtils._
import utils.PlainSQLUtils._
implicit class SpatialPositionedResult(r: PositionedResult) {
def nextGeometry[T <: Geometry](): T = nextGeometryOption().getOrElse(null.asInstanceOf[T])
def nextGeometryOption[T <: Geometry](): Option[T] = r.nextStringOption().map(fromLiteral[T])
}
////////////////////////////////////////////////////////////////////////////////
implicit val getGeometry = mkGetResult(_.nextGeometry[Geometry]())
implicit val getGeometryOption = mkGetResult(_.nextGeometryOption[Geometry]())
implicit object SetGeometry extends SetParameter[Geometry] {
def apply(v: Geometry, pp: PositionedParameters) = setGeometry(Option(v), pp)
}
implicit object SetGeometryOption extends SetParameter[Option[Geometry]] {
def apply(v: Option[Geometry], pp: PositionedParameters) = setGeometry(v, pp)
}
///
private def setGeometry[T <: Geometry](maybeGeo: Option[T], p: PositionedParameters) = {
maybeGeo match {
case Some(v) => p.setBytes(toBytes(v))
case None => p.setNull(java.sql.Types.OTHER)
}
}
}
//////////////////////// geometry jdbc type ///////////
class GeometryJdbcType[T <: Geometry](implicit override val classTag: ClassTag[T]) extends DriverJdbcType[T] {
import MySQLSpatialSupportUtils._
override def sqlType: Int = java.sql.Types.OTHER
override def sqlTypeName(sym: Option[FieldSymbol]): String = "geometry"
override def getValue(r: ResultSet, idx: Int): T = {
val geoInWkb = r.getBytes(idx)
if (r.wasNull()) null.asInstanceOf[T]
else fromBytes(geoInWkb)
}
override def setValue(v: T, p: PreparedStatement, idx: Int): Unit = {
p.setBytes(idx,toBytes(v))
}
override def updateValue(v: T, r: ResultSet, idx: Int): Unit = {
r.updateBytes(idx, toBytes(v))
}
override def hasLiteralForm: Boolean = false
override def valueToSQLLiteral(v: T) = {
if (v eq null) "NULL" else s"'${toLiteral(v)}'"
}
}
}
|
borissmidt/slick-mysql
|
src/main/scala/com/foerstertechnologies/slickmysql/MySQLJsonSupport.scala
|
<gh_stars>1-10
package com.foerstertechnologies.slickmysql
import slick.jdbc.{JdbcProfile, JdbcType, MySQLProfile, PositionedResult}
import scala.reflect.classTag
/** simple json string wrapper */
case class JsonString(value: String)
/**
* simple json support; if all you want is just getting from / saving to db, and using json operations/methods, it should be enough
*/
trait MySQLJsonSupport extends json.MySQLJsonExtension with utils.MySQLCommonJdbcTypes { self: MySQLProfile =>
import self.api._
///---
val json = "json"
///---
trait SimpleJsonCodeGenSupport {
// register types to let `ExModelBuilder` find them
if (self.isInstanceOf[ExMySQLProfile]) {
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("json", classTag[JsonString])
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("jsonb", classTag[JsonString])
}
}
/// alias
trait JsonImplicits extends SimpleJsonImplicits
trait SimpleJsonImplicits extends SimpleJsonCodeGenSupport {
implicit val simpleJsonTypeMapper: JdbcType[JsonString] =
new GenericJdbcType[JsonString](
json,
(v) => JsonString(v),
(v) => v.value,
java.sql.Types.LONGVARCHAR,
hasLiteralForm = false
)
implicit def simpleJsonColumnExtensionMethods(c: Rep[JsonString]) = {
new JsonColumnExtensionMethods[JsonString, JsonString](c)
}
implicit def simpleJsonOptionColumnExtensionMethods(c: Rep[Option[JsonString]]) = {
new JsonColumnExtensionMethods[JsonString, Option[JsonString]](c)
}
}
trait SimpleJsonPlainImplicits extends SimpleJsonCodeGenSupport {
import utils.PlainSQLUtils._
implicit class MySQLJsonPositionedResult(r: PositionedResult) {
def nextJson() = nextJsonOption().orNull
def nextJsonOption() = r.nextStringOption().map(JsonString)
}
//////////////////////////////////////////////////////////////
implicit val getJson = mkGetResult(_.nextJson())
implicit val getJsonOption = mkGetResult(_.nextJsonOption())
implicit val setJson = mkSetParameter[JsonString](json, _.value)
implicit val setJsonOption = mkOptionSetParameter[JsonString](json, _.value)
}
}
|
borissmidt/slick-mysql
|
addons/circe-json/src/test/scala/com/foerstertechnologies/slickmysql/MySQLCircleJsonSupportSuite.scala
|
package com.foerstertechnologies.slickmysql
import org.scalatest.FunSuite
import slick.jdbc.{GetResult, MySQLProfile, PostgresProfile}
import scala.concurrent.{Await, ExecutionContext}
import java.util.concurrent.Executors
class MySQLCircleJsonSupportSuite extends FunSuite {
implicit val testExecContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(4))
trait MyMySQLProfile extends MySQLProfile with MySQLCirceJsonSupport {
override val api: API = new API {}
val plainApi = new API with CirceJsonPlainImplicits
trait API extends super.API with CirceJsonImplicits
}
object MyMySQLProfile extends MyMySQLProfile
import MyMySQLProfile.api._
test("Profile builds") {
val db = Database.forURL("localhost", driver = "com.mysql.cj.jdbc.Driver")
}
}
|
borissmidt/slick-mysql
|
addons/joda-money/src/main/scala/com/foerstertechnologies/slickmysql/MySQLJodaMoneySupport.scala
|
package com.foerstertechnologies.slickmysql
import java.math.RoundingMode
import java.sql.{PreparedStatement, ResultSet}
import org.joda.money.{CurrencyUnit, Money}
import slick.ast.NumericTypedType
import slick.jdbc._
import scala.reflect.classTag
trait MySQLJodaMoneySupport extends money.MySQLMoneyExtension with utils.MySQLCommonJdbcTypes { self: MySQLProfile =>
import self.api._
val defaultCurrencyUnit: CurrencyUnit = CurrencyUnit.EUR
trait JodaMoneyCodegenSupport {
// register types to let `ExMyMySQLBuilder` find them
if (self.isInstanceOf[ExMySQLProfile]) {
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("money", classTag[Money])
}
}
class JodaMoneyJdbcType extends DriverJdbcType[Money] with NumericTypedType {
def sqlType = java.sql.Types.DECIMAL
def setValue(v: Money, p: PreparedStatement, idx: Int) = p.setBigDecimal(idx, v.getAmount)
def getValue(r: ResultSet, idx: Int) = {
val v = r.getBigDecimal(idx)
if(v eq null) null else Money.of(defaultCurrencyUnit, BigDecimal(v).bigDecimal, RoundingMode.UNNECESSARY)
}
def updateValue(v: Money, r: ResultSet, idx: Int) = r.updateBigDecimal(idx, v.getAmount)
}
trait JodaMoneyImplicits extends JodaMoneyCodegenSupport {
implicit val jodaMoneyJdbType = new JodaMoneyJdbcType
implicit def playJsonColumnExtensionMethods(c: Rep[Money]) = {
new MoneyColumnExtensionMethods[Money, Money](c)
}
implicit def playJsonOptionColumnExtensionMethods(c: Rep[Option[Money]]) = {
new MoneyColumnExtensionMethods[Money, Option[Money]](c)
}
}
trait JodaMoneyPlainImplicits extends JodaMoneyCodegenSupport {
private def stringToJodaMoney(v: String): Money =
Money.of(defaultCurrencyUnit, BigDecimal(v).bigDecimal, RoundingMode.UNNECESSARY)
private def jodaMoneyToString(money: Money): String = {
println("writing", money.getAmount.toString)
money.getAmount.toString
}
import utils.PlainSQLUtils._
implicit class MySQLJodaMoneyPositionedResult(r: PositionedResult) {
def nextMoney() = nextMoneyOption().orNull
def nextMoneyOption() = r.nextStringOption().map(stringToJodaMoney)
}
// Accessors
implicit val getMoney: GetResult[Money] = mkGetResult(_.nextMoney())
implicit val getMoneyOption: GetResult[Option[Money]] = mkGetResult(_.nextMoneyOption())
implicit val setMoney = mkSetParameter[Money]("money", jodaMoneyToString, java.sql.Types.DECIMAL)
implicit val setMoneyOption = mkOptionSetParameter[Money]("money", jodaMoneyToString, java.sql.Types.DECIMAL)
}
}
|
borissmidt/slick-mysql
|
core/src/test/scala/com/foerstertechnologies/slickmysql/ExMySQLProfileSpec.scala
|
package com.foerstertechnologies.slickmysql
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import slick.lifted.{ProvenShape, TableQuery, Tag}
import ExMySQLProfile.api._
class ExMySQLProfileSpec extends AnyFlatSpec with Matchers {
"the native upsert builder" should "create a valid my-sql query" in {
val sql = ExMySQLProfile.compileInsert(TableQuery[TestTable].toNode).upsert.sql
sql.contains("(`id`,`start`,`end`,`status`)") shouldBe true
sql.contains("(?,?,?,?)") shouldBe true
sql.contains("`id` = values(`id`)") shouldBe true
sql.contains("`start` = values(`start`)") shouldBe true
sql.contains("`end` = values(`end`)") shouldBe true
sql.contains("`status` = values(`status`)") shouldBe true
}
}
case class Row(
id: Int,
start: Long,
end: Long,
status: String
)
class TestTable(tag: Tag) extends Table[Row](tag, "TestTable") {
val id = column[Int]("id")
val start = column[Long]("start")
val end = column[Long]("end")
val status = column[String]("status")
override def * : ProvenShape[Row] =
(
id,
start,
end,
status
) <> (
(Row.apply _).tupled,
Row.unapply
)
}
|
borissmidt/slick-mysql
|
core/src/main/scala/com/foerstertechnologies/slickmysql/utils/MySQLTokenHelper.scala
|
<filename>core/src/main/scala/com/foerstertechnologies/slickmysql/utils/MySQLTokenHelper.scala
package com.foerstertechnologies.slickmysql.utils
import scala.util.parsing.combinator.RegexParsers
import scala.util.parsing.input.CharSequenceReader
import scala.collection.mutable
import slick.SlickException
object MySQLTokenHelper {
sealed trait Token {
def value: String
}
case class GroupToken(members: Seq[Token]) extends Token {
val value = ""
override def toString = {
StringBuilder.newBuilder append "GroupToken(" append {
members map { m => m.toString } mkString (",")
} append ")" toString
}
}
case object Comma extends Token {
val value = ","
override def toString = "Comma"
}
case object Null extends Token {
val value = ""
override def toString = "Null"
}
case class Chunk (value: String) extends Token {
override def toString = s"Chunk($value)"
}
case class Escape(value: String) extends Token {
override def toString = s"Escape($value)"
}
trait Border extends Token {
def marker: String
}
case class Open (value: String, marker: String = "") extends Border {
override def toString = s"Open($marker$value)"
}
case class Close(value: String, marker: String = "") extends Border {
override def toString = s"Close($value$marker)"
}
case class Marker(marker: String) extends Border {
val value = ""
override def toString = s"Marker($marker)"
}
///////////////////////////////////////////////////////////////////
private case class WorkingGroup(border: Border, level: Int) {
val tokens = mutable.ListBuffer[Token]()
}
def getString(token: Token, level: Int): String = {
def unescape(value: String, level: Int): String = {
val step = math.pow(2, level).toInt
(for(i <- (-1 + step) until (value.length, step)) yield value.charAt(i))
.mkString("")
}
def mergeString(buf: mutable.StringBuilder, token: Token): Unit =
token match {
case GroupToken(mList) => mList.foreach(mergeString(buf, _))
case Escape(v) => buf append unescape(v, level + 1)
case Marker(m) => buf append unescape(m, level + 1)
case t => buf append t.value
}
///
val buf = StringBuilder.newBuilder
mergeString(buf, token)
buf.toString
}
def getChildren(token: Token): Seq[Token] = token match {
case GroupToken(mList) => mList.filterNot(_.isInstanceOf[Border]).filterNot(_ == Comma)
case _ => throw new IllegalArgumentException("WRONG token type: " + token)
}
def createString(root: Token): String = {
val MARK_REQUIRED_CHAR_LIST = List('\\', '"', ',', '(', ')', '{', '}')
///
def isArray(token: Token): Boolean = token match {
case GroupToken(mList) =>
mList match {
case Open("{", _) :: _ => true
case _ => false
}
case _ => false
}
def isMarkRequired(token: Token, parentIsArray: Boolean): Boolean = token match {
case _: GroupToken => !parentIsArray || !isArray(token)
case Chunk(v) => v.isEmpty || v.trim.length < v.length || "NULL".equalsIgnoreCase(v) || v.find(MARK_REQUIRED_CHAR_LIST.contains).isDefined
case _ => false
}
val rootIsArray = isArray(root)
def appendMark(buf: mutable.StringBuilder, level: Int) =
if (level >= 0) {
val markLen = math.pow(2, level).toInt
level match {
case 0 => buf append "\""
case 1 => buf append (if (rootIsArray) "\\\"" else "\"\"")
case 2 => buf append (if (rootIsArray) "\\\"\\\"" else "\\\\\"\"")
case _ => buf append ("\\" * (markLen -4)) append (if (rootIsArray) "\\\"\\\"" else "\\\\\"\"")
}
}
def appendEscaped(buf: mutable.StringBuilder, ch: Char, level: Int) =
if (level < 0) buf append ch
else {
val escapeLen = math.pow(2, level +1).toInt
ch match {
case '\\' => buf append ("\\" * escapeLen)
case '"' => buf append ("\\" * (escapeLen -1)) append '"'
case _ => buf append ch
}
}
def mergeString(buf: mutable.StringBuilder, token: Token, level: Int, parentIsArray: Boolean): Unit = {
val markRequired = isMarkRequired(token, parentIsArray)
if (markRequired) appendMark(buf, level)
token match {
case GroupToken(mList) => {
buf append mList(0).value
var isFirst = true
for(i <- 1 to (mList.length -2)) {
if (isFirst) isFirst = false else buf append ","
mergeString(buf, mList(i), level +1, isArray(token))
}
buf append mList.last.value
}
case Chunk(v) => v.map(appendEscaped(buf, _, level))
case _ => //nothing to do
}
if (markRequired) appendMark(buf, level)
}
///
val buf = StringBuilder.newBuilder
mergeString(buf, root, -1, isArray(root))
buf.toString
}
def grouping(tokens: List[Token]): Token = {
def level(marker: String): Double =
math.log(marker.length) / math.log(2)
def isCompatible(open: Border, close: Border) =
(open, close) match {
case (Open("(", m1), Close(")", m2)) if m1 == m2 => true
case (Open("(", m1), Close("]", m2)) if m1 == m2 => true
case (Open("[", m1), Close(")", m2)) if m1 == m2 => true
case (Open("[", m1), Close("]", m2)) if m1 == m2 => true
case (Open("{", m1), Close("}", m2)) if m1 == m2 => true
case (_, _) => false
}
///
val stack = mutable.Stack[WorkingGroup]()
stack.push(WorkingGroup(Marker(""), -1))
for(i <- 0 until tokens.length) {
tokens(i) match {
//-- process head and last tokens
case t if (i == 0 || i == tokens.length -1) => stack.top.tokens += t
//-- insert Null token if necessary
case Comma => {
if (tokens(i-1) == Comma || tokens(i-1).isInstanceOf[Open]) {
stack.top.tokens += Null
}
stack.top.tokens += Comma
if (tokens(i+1).isInstanceOf[Close]) {
stack.top.tokens += Null
}
}
//-- process open tokens
// '{' + '{' -> multi-dimension array, 'ttt{' -> normal string
case t @ Open("{", "") => {
if (tokens(i-1).value == "{") {
stack.push(WorkingGroup(t, stack.top.level))
stack.top.tokens += t
} else stack.top.tokens += Chunk("{")
}
// open border should prefix a marker, negative case: ',"tt(a...' <--> ',"(tt...' (normal)
case Open(v, "") => stack.top.tokens += Chunk(v)
// mark + escape, case: ',"\"(...",' <--> ',"...",' (normal)
case Open(v, m) if (m != "" && level(m) != math.round(level(m))) => {
val index = math.pow(2, stack.top.level).toInt
stack.push(WorkingGroup(Marker(m.substring(0, index)), stack.top.level +1))
stack.top.tokens += Marker(m.substring(0, index)) += Escape(m.substring(index)) += Chunk(v)
}
case t @ Open(v, m) => {
if (tokens(i-1) == Comma || tokens(i-1).isInstanceOf[Open]) {
stack.push(WorkingGroup(t, stack.top.level +1))
stack.top.tokens += t
} // case: 'tt\"(...'
else stack.top.tokens += Escape(m) += Chunk(v)
}
//-- process marker tokens
// mark + mark (empty string)
case Marker(m) if (m != "" && level(m) == stack.top.level + 2) => {
val m2 = m.substring(0, m.length / 2)
stack.top.tokens += GroupToken(List(Marker(m2), Chunk(""), Marker(m2)))
}
// mark + escape
case Marker(m) if (m != "" && level(m) != math.round(level(m))
&& (tokens(i-1) == Comma || tokens(i-1).isInstanceOf[Open])) => {
val index = math.pow(2, stack.top.level +1).toInt
stack.push(WorkingGroup(Marker(m.substring(0, index)), stack.top.level +1))
stack.top.tokens += Marker(m.substring(0, index)) += Escape(m.substring(index))
}
// escape + mark
case Marker(m) if (m != "" && level(m) != math.round(level(m))
&& (tokens(i+1) == Comma || tokens(i+1).isInstanceOf[Close])) => {
val existed = stack.find(g => m.endsWith(g.border.marker)).get
for (_ <- 0 to stack.lastIndexOf(existed)) {
if (stack.top == existed) {
val index = m.length - stack.top.border.marker.length
stack.top.tokens += Escape(m.substring(0, index)) += Marker(m.substring(index))
}
val toBeMerged = GroupToken(stack.pop.tokens.toList)
stack.top.tokens += toBeMerged
}
}
// mark + escape + mark
case t @ Marker(m) if ((tokens(i-1) == Comma || tokens(i-1).isInstanceOf[Open])
&& (tokens(i+1) == Comma || tokens(i+1).isInstanceOf[Close])) => {
val topMarker = stack.top.border.marker
if (topMarker == "") {
stack.push(WorkingGroup(t, stack.top.level +1))
} else if ((m.length > topMarker.length * 2) && m.startsWith(topMarker) && m.endsWith(topMarker)) {
stack.top.tokens += Escape(m.substring(topMarker.length -1, m.length -topMarker.length))
} else stack.top.tokens += Escape(m)
}
case t @ Marker(m) => {
val escape = stack.top.tokens.find(e => e.isInstanceOf[Escape] && e.value == m)
// mark + escape ... + [escape]
if (escape.isDefined) {
stack.top.tokens += Escape(m)
} else { // others
val existed = stack.find(g => g.border.marker == m)
if (existed.isDefined) {
for (_ <- 0 to stack.lastIndexOf(existed.get)) {
if (stack.top == existed.get) stack.top.tokens += t
val toBeMerged = GroupToken(stack.pop.tokens.toList)
stack.top.tokens += toBeMerged
}
} else {
stack.push(WorkingGroup(t, stack.top.level +1))
stack.top.tokens += t
}
}
}
//-- process close tokens
// '{' + '{' -> multi-dimension array, 'ttt{' -> normal string
case Close("}", "") if (tokens(i+1).value != "}") => stack.top.tokens += Chunk("}")
// close border should postfix a marker, negative case: ',"...)ttt' <--> ',"...)\",' (normal)
case Close(v, "") if (v != "}") => stack.top.tokens += Chunk(v)
// escape + mark, case: '...tt)\"",' <--> '...tt)",..' (normal)
case Close(v, m) if (m != "" && level(m) != math.round(level(m))) => {
val existed = stack.find(b => m.endsWith(b.border.marker)).get
for (_ <- 0 to stack.lastIndexOf(existed)) {
if (stack.top == existed) {
val index = m.length - stack.top.border.marker.length
stack.top.tokens += Chunk(v) += Escape(m.substring(0, index)) += Marker(m.substring(index))
}
val toBeMerged = GroupToken(stack.pop.tokens.toList)
stack.top.tokens += toBeMerged
}
}
case t @ Close(v, m) => {
// case: '(...),' or '...}}'
if ((tokens(i+1) == Comma || tokens(i+1).isInstanceOf[Close]) && isCompatible(stack.top.border, t)) {
stack.top.tokens += t
val toBeMerged = GroupToken(stack.pop.tokens.toList)
stack.top.tokens += toBeMerged
}
else {
// case: '"Word1 (Word2)", ...'
if (stack.top.border.isInstanceOf[Marker] && stack.top.border.marker == m) {
stack.top.tokens += Chunk(v)
val toBeMerged = GroupToken(stack.pop.tokens.toList :+ Marker(m))
stack.top.tokens += toBeMerged
}
// case: ',"}ttt...'
else stack.top.tokens += Chunk(v) += Escape(m)
}
}
//-- process other tokens
case t => stack.top.tokens += t
}
}
///
GroupToken(stack.top.tokens.toList)
}
////////////////////////////////////////////////////////////////////////////
object Tokenizer extends RegexParsers {
override def skipWhitespace = false
val MARKER = """[\\|"]+""".r
val ESCAPE = """\\+[^"\\]""".r
val CHUNK = """[^}){(\[\]\\,"]+""".r
def open: Parser[Token] = opt(MARKER) ~ (elem('{') | elem('(') | elem('[')) ^^ {
case (x ~ y) => Open(String.valueOf(y), x.getOrElse(""))
}
def close: Parser[Token] = (elem('}') | elem(')') | elem(']')) ~ opt(MARKER) ^^ {
case (x ~ y) => Close(String.valueOf(x), y.getOrElse(""))
}
def escape = ESCAPE ^^ { x => Escape(x) }
def marker = MARKER ^^ { x => Marker(x) }
def comma = elem(',') ^^ { x => Comma }
def chunk = CHUNK ^^ { x => Chunk(x) }
def patterns = open | close | escape | marker | comma | chunk
//--
def tokenize(input: String) =
parseAll(rep(patterns), new CharSequenceReader(input)) match {
case Success(result, _) => result
case failure: NoSuccess => throw new SlickException(failure.msg)
}
}
}
|
borissmidt/slick-mysql
|
build.sbt
|
<reponame>borissmidt/slick-mysql
lazy val scala212 = "2.12.8"
lazy val scala213 = "2.13.3"
lazy val commonSettings = Seq(
organizationName := "foerster technologies",
organization := "com.foerster-technologies",
name := "slick-mysql",
version := "1.1.0",
scalaVersion := scala213,
crossScalaVersions := List(scala212, scala213),
scalacOptions ++= Seq("-deprecation",
"-feature",
"-language:implicitConversions",
"-language:reflectiveCalls",
"-language:higherKinds",
"-language:postfixOps",
"-language:existentials"),
resolvers += Resolver.mavenLocal,
resolvers += Resolver.sonatypeRepo("snapshots"),
resolvers += "Typesafe repository" at "https://repo.typesafe.com/typesafe/releases/",
resolvers += "spray" at "https://repo.spray.io/",
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (version.value.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishConfiguration := publishConfiguration.value.withOverwrite(true),
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
// makePomConfiguration := makePomConfiguration.value. // ~= { _.(configurations = Some(Seq(Compile, Runtime, Optional))) },
pomExtra :=
<url>https://github.com/foerster-technologies/slick-mysql</url>
<licenses>
<license>
<name>BSD-style</name>
<url>http://www.opensource.org/licenses/bsd-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url><EMAIL>:foerster-technologies/slick-mysql.git</url>
<connection>scm:git:<EMAIL>:foerster-technologies/slick-mysql.git</connection>
</scm>
<developers>
<developer>
<id>TimFoerster</id>
<name><NAME></name>
<email><EMAIL></email>
<organization>foerster technologies GmbH</organization>
<organizationUrl>https://www.foerster-technologies.com</organizationUrl>
<timezone>+1</timezone>
</developer>
</developers>
)
def mainDependencies(scalaVersion: String) = Seq (
"org.scala-lang" % "scala-reflect" % scalaVersion,
"com.typesafe.slick" %% "slick" % "3.3.2",
"org.slf4j" % "slf4j-simple" % "1.7.30" % "provided",
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.2" % "provided",
"org.scalatest" %% "scalatest" % "3.1.1" % "test"
)
lazy val slickMySQLCore = (project in file("./core"))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql_core",
description := "Slick extensions for MySQL - Core",
libraryDependencies := mainDependencies(scalaVersion.value)
)
)
lazy val slickMySQLProject = (project in file("."))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql",
description := "Slick extensions for MySQL",
libraryDependencies := mainDependencies(scalaVersion.value)
)
).dependsOn(slickMySQLCore)
.aggregate(slickMySQLCore, slickMySQLJts, slickMySQLPlayJson, slickMySQLCirceJson, slickMySQLJodaMoney)
lazy val slickMySQLJts = (project in file("./addons/jts"))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql_jts",
description := "Slick extensions for MySQL - jts module",
libraryDependencies := mainDependencies(scalaVersion.value) ++ Seq(
"org.locationtech.jts" % "jts-core" % "1.16.1"
)
)
).dependsOn(slickMySQLCore)
lazy val slickMySQLPlayJson = (project in file("./addons/play-json"))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql_play-json",
description := "Slick extensions for MySQL - play-json module",
libraryDependencies := mainDependencies(scalaVersion.value) ++ Seq(
"com.typesafe.play" %% "play-json" % "2.8.1"
)
)
).dependsOn(slickMySQLCore)
lazy val circeVersion = "0.13.0"
lazy val slickMySQLCirceJson = (project in file("./addons/circe-json"))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql_circe-json",
description := "Slick extensions for MySQL - circe-json module",
libraryDependencies := mainDependencies(scalaVersion.value) ++ Seq(
"io.circe" %% "circe-core",
"io.circe" %% "circe-generic",
"io.circe" %% "circe-parser",
"io.circe" %% "circe-literal"
).map(_ % circeVersion),
)
).dependsOn(slickMySQLCore)
lazy val slickMySQLJodaMoney = (project in file("./addons/joda-money"))
.settings(
Defaults.coreDefaultSettings ++ commonSettings ++ Seq(
name := "slick-mysql_joda-money",
description := "Slick extensions for MySQL - joda-money module",
libraryDependencies := mainDependencies(scalaVersion.value) ++ Seq(
"org.joda" % "joda-money" % "1.0.1"
)
)
).dependsOn(slickMySQLCore)
|
borissmidt/slick-mysql
|
addons/jts/src/main/scala/com/foerstertechnologies/slickmysql/MySQLSpatialSupportUtils.scala
|
<filename>addons/jts/src/main/scala/com/foerstertechnologies/slickmysql/MySQLSpatialSupportUtils.scala
package com.foerstertechnologies.slickmysql
import java.nio.ByteBuffer
import org.locationtech.jts.geom.{Geometry, GeometryFactory, Point, PrecisionModel}
import org.locationtech.jts.io.{ByteOrderValues, WKBReader, WKBWriter, WKTReader, WKTWriter}
object MySQLSpatialSupportUtils {
private val wktWriterHolder = new ThreadLocal[WKTWriter]
private val wktReaderHolder = new ThreadLocal[WKTReader]
private val wkbWriterHolder = new ThreadLocal[WKBWriter]
private val wkb3DWriterHolder = new ThreadLocal[WKBWriter]
private val wkbReaderHolder = new ThreadLocal[WKBReader]
def toLiteral(geom: Geometry): String = {
if (wktWriterHolder.get == null) wktWriterHolder.set(new WKTWriter())
val lit = wktWriterHolder.get.write(geom)
lit
}
def fromLiteral[T](value: String): T = {
splitRSIDAndWKT(value) match {
case (srid, wkt) =>
val geom =
if (wkt.startsWith("00") || wkt.startsWith("01")) {
if (wkbReaderHolder.get == null) wkbReaderHolder.set(new WKBReader())
fromBytes(WKBReader.hexToBytes(wkt))
} else {
if (wktReaderHolder.get == null) wktReaderHolder.set(new WKTReader())
wktReaderHolder.get.read(wkt)
}
if (srid != -1) geom.setSRID(srid)
geom.asInstanceOf[T]
}
}
def geometryFromWkb[T](wkbReader: ThreadLocal[WKBReader], bytes: Array[Byte]): T = {
wkbReader.get.read(bytes).asInstanceOf[T]
}
def fromBytes[T](bytes: Array[Byte]): T = {
// MySQL stores geometry values using 4 bytes to indicate the SRID followed by the WKB representation of the value.
// https://dev.mysql.com/doc/refman/8.0/en/storage-requirements.html
if (wkbReaderHolder.get == null) wkbReaderHolder.set(new WKBReader(new GeometryFactory(new PrecisionModel, ByteBuffer.wrap(bytes.take(4)).getInt)))
geometryFromWkb(wkbReaderHolder, bytes.drop(4))
}
private def splitRSIDAndWKT(value: String): (Int, String) = {
if (value.startsWith("SRID=")) {
val index = value.indexOf(';', 5) // srid prefix length is 5
if (index == -1) {
throw new java.sql.SQLException("Error parsing Geometry - SRID not delimited with ';' ")
} else {
val srid = Integer.parseInt(value.substring(0, index))
val wkt = value.substring(index + 1)
(srid, wkt)
}
} else (-1, value)
}
def toBytes[T <: Geometry](geom: T): Array[Byte] = {
var writer: WKBWriter = null
if (geom != null && geom.getCoordinate != null && !java.lang.Double.isNaN(geom.getCoordinate.getZ)) {
if (wkb3DWriterHolder.get == null) wkb3DWriterHolder.set(new WKBWriter(3, ByteOrderValues.LITTLE_ENDIAN, false))
writer = wkbWriterHolder.get
} else {
if (wkbWriterHolder.get == null) wkbWriterHolder.set(new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, false))
writer = wkbWriterHolder.get
}
val wkbValue = writer.write(geom)
val srid = geom.getSRID
val sridByte = Array(srid.toByte, (srid >>> 8).toByte, (srid >>> 16).toByte, (srid >>> 24).toByte)
// MySQL stores the srid with the wkb value in little endian.
// Enabling the WKBWriter with `inclineSrid` resolves in incorrect data.
sridByte ++ wkbValue
}
def toWkt(geom: Geometry): String = {
new WKTWriter().write(geom)
}
/**
* Helper method for debugging porpuse
* @param bytes array that will be printed
* @return bytes as hex representation
*/
def convertBytesToHex(bytes: Seq[Byte]): String = {
val sb = new StringBuilder
for (b <- bytes) {
sb.append(String.format("%02x", Byte.box(b)))
}
sb.toString
}
}
|
borissmidt/slick-mysql
|
src/main/scala/com/foerstertechnologies/slickmysql/PatchActionExtensionMethodsSupport.scala
|
package com.foerstertechnologies.slickmysql
import slick.ast.{CompiledStatement, Node, ResultSetMapping}
import slick.dbio.{Effect, NoStream}
import slick.jdbc._
import slick.lifted.Query
import slick.relational.{CompiledMapping, ProductResultConverter, ResultConverter, TypeMappingResultConverter}
import slick.util.{ProductWrapper, SQLBuilder}
trait PatchActionExtensionMethodsSupport { self: MySQLProfile =>
trait PatchActionImplicits {
implicit def queryPatchActionExtensionMethods[U <: Product, C[_]](
q: Query[_, U, C]
): PatchActionExtensionMethodsImpl[U] =
createPatchActionExtensionMethods(updateCompiler.run(q.toNode).tree, ())
}
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////// Patch Actions
///////////////////////////////////////////////////////////////////////////////////////////////
type PatchActionExtensionMethods[T <: Product] = PatchActionExtensionMethodsImpl[T]
def createPatchActionExtensionMethods[T <: Product](tree: Node, param: Any): PatchActionExtensionMethods[T] =
new PatchActionExtensionMethodsImpl[T](tree, param)
class PatchActionExtensionMethodsImpl[T <: Product](tree: Node, param: Any) {
protected[this] val ResultSetMapping(_, CompiledStatement(_, sres: SQLBuilder.Result, _),
CompiledMapping(_converter, _)) = tree
protected[this] val converter = _converter.asInstanceOf[ResultConverter[JdbcResultConverterDomain, Product]]
protected[this] val TypeMappingResultConverter(childConverter, toBase, toMapped) = converter
protected[this] val ProductResultConverter(elementConverters @ _ *) =
childConverter.asInstanceOf[ResultConverter[JdbcResultConverterDomain, Product]]
private[this] val updateQuerySplitRegExp = """(.*)(?<=set )((?:(?= where)|.)+)(.*)?""".r
private[this] val updateQuerySetterRegExp = """[^\s]+\s*=\s*\?""".r
/** An Action that updates the data selected by this query. */
def patch(value: T): ProfileAction[Int, NoStream, Effect.Write] = {
val (seq, converters) = value.productIterator.zipWithIndex.toIndexedSeq
.zip(elementConverters)
.filter {
case ((Some(_), _), _) => true
case ((None, _), _) => false
case ((null, _), _) => false
case ((_, _), _) => true
}
.unzip
val (products, indexes) = seq.unzip
val newConverters = converters.zipWithIndex
.map(c => (c._1, c._2 + 1))
.map {
case (c: BaseResultConverter[_], idx) => new BaseResultConverter(c.ti, c.name, idx)
case (c: OptionResultConverter[_], idx) => new OptionResultConverter(c.ti, idx)
case (c: DefaultingResultConverter[_], idx) => new DefaultingResultConverter(c.ti, c.default, idx)
case (c: IsDefinedResultConverter[_], idx) => new IsDefinedResultConverter(c.ti, idx)
}
val productResultConverter =
ProductResultConverter(newConverters: _*).asInstanceOf[ResultConverter[JdbcResultConverterDomain, Any]]
val newConverter = TypeMappingResultConverter(productResultConverter, (p: Product) => p, (a: Any) => toMapped(a))
val newValue: Product = new ProductWrapper(products)
val newSql = sres.sql match {
case updateQuerySplitRegExp(prefix, setter, suffix) =>
val buffer = new StringBuilder()
buffer.append(prefix)
buffer.append(
updateQuerySetterRegExp
.findAllIn(setter)
.zipWithIndex
.filter(s => indexes.contains(s._2))
.map(_._1)
.mkString(", ")
)
buffer.append(suffix)
buffer.toString()
}
new SimpleJdbcProfileAction[Int]("patch", Vector(newSql)) {
def run(ctx: Backend#Context, sql: Vector[String]): Int =
ctx.session.withPreparedStatement(sql.head) { st =>
st.clearParameters()
newConverter.set(newValue, st)
sres.setter(st, newConverter.width + 1, param)
st.executeUpdate
}
}
}
}
}
|
borissmidt/slick-mysql
|
addons/play-json/src/main/scala/com/foerstertechnologies/slickmysql/MySQLPlayJsonSupport.scala
|
<gh_stars>1-10
package com.foerstertechnologies.slickmysql
import slick.jdbc.{JdbcProfile, JdbcType, MySQLProfile, PositionedResult}
import scala.reflect.classTag
trait MySQLPlayJsonSupport extends json.MySQLJsonExtension with utils.MySQLCommonJdbcTypes { self: MySQLProfile =>
import self.api._
import play.api.libs.json._
///---
val json: String = "json"
///---
trait PlayJsonCodeGenSupport {
// register types to let `ExModelBuilder` find them
if (self.isInstanceOf[ExMySQLProfile]) {
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("json", classTag[JsValue])
self.asInstanceOf[ExMySQLProfile].bindMySQLTypeToScala("jsonb", classTag[JsValue])
}
}
trait PlayJsonImplicits extends PlayJsonCodeGenSupport {
implicit val playJsonTypeMapper: JdbcType[JsValue] =
new GenericJdbcType[JsValue](
json,
(v) => Json.parse(v),
(v) => Json.stringify(v).replace("\\u0000", ""),
java.sql.Types.LONGVARCHAR,
hasLiteralForm = false
)
implicit def playJsonColumnExtensionMethods(c: Rep[JsValue]) = {
new JsonColumnExtensionMethods[JsValue, JsValue](c)
}
implicit def playJsonOptionColumnExtensionMethods(c: Rep[Option[JsValue]]) = {
new JsonColumnExtensionMethods[JsValue, Option[JsValue]](c)
}
}
trait PlayJsonPlainImplicits extends PlayJsonCodeGenSupport {
import utils.PlainSQLUtils._
implicit class MySQLJsonPositionedResult(r: PositionedResult) {
def nextJson() = nextJsonOption().getOrElse(JsNull)
def nextJsonOption() = r.nextStringOption().map(Json.parse)
}
////////////////////////////////////////////////////////////
implicit val getJson = mkGetResult(_.nextJson())
implicit val getJsonOption = mkGetResult(_.nextJsonOption())
implicit val setJson = mkSetParameter[JsValue](json, Json.stringify)
implicit val setJsonOption = mkOptionSetParameter[JsValue](json, Json.stringify)
}
}
|
borissmidt/slick-mysql
|
addons/jts/src/test/scala/com/foerstertechnologies/slickmysql/MySQLSpatialSupportSuite.scala
|
<reponame>borissmidt/slick-mysql
package com.foerstertechnologies.slickmysql
import org.scalatest.FunSuite
import slick.jdbc.{GetResult, MySQLProfile, PostgresProfile}
import scala.concurrent.{Await, ExecutionContext}
import java.util.concurrent.Executors
class MySQLSpatialSupportSuite extends FunSuite {
implicit val testExecContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(4))
trait MyMySQLProfile extends MySQLProfile with MySQLSpatialSupport {
override val api: API = new API {}
val plainApi = new API with MySQLSpatialPlainImplicits
trait API extends super.API with MySQLSpatialImplicits
}
object MyMySQLProfile extends MyMySQLProfile
import MyMySQLProfile.api._
test("Profile builds") {
val db = Database.forURL("localhost", driver = "com.mysql.cj.jdbc.Driver")
}
}
|
borissmidt/slick-mysql
|
project/plugins.sbt
|
// Comment to get more information during initialization
logLevel := Level.Warn
// The Typesafe repository
resolvers += "Typesafe repository" at "https://repo.typesafe.com/typesafe/releases/"
// Add sbt PGP Plugin
addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.8.1")
addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.1")
// addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "2.3")
// addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.1.1")
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/System.scala
|
<gh_stars>0
package redefine
import chisel3._
import freechips.rocketchip.config.{Parameters}
import freechips.rocketchip.subsystem._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util.{DontTouch}
class REDEFINESystem(implicit p: Parameters) extends REDEFINESubsystem
with HasAsyncExtInterrupts
with CanHaveMasterAXI4MemPort
with CanHaveMasterAXI4MMIOPort
with CanHaveSlaveAXI4Port
with HasPeripheryBootROM
//traits/mixins
{
override lazy val module = new REDEFINESystemModuleImp(this)
}
class REDEFINESystemModuleImp[+L <: REDEFINESystem](_outer: L) extends REDEFINESubsystemModuleImp(_outer)
with HasRTCModuleImp
with HasExtInterruptsModuleImp
with HasPeripheryBootROMModuleImp
with DontTouch
|
mysoreanoop/chipyard
|
generators/redefine/src/test/scala/dma/DMAUnitTest.scala
|
<reponame>mysoreanoop/chipyard<gh_stars>0
// See README.md for license details.
package dma
import chisel3.iotesters
import chisel3.iotesters._
import chisel3._
import freechips.rocketchip.config.Parameters
import freechips.rocketchip.diplomacy.LazyModule
class TBDMATop(c:ChiselTopWrapper) extends PeekPokeTester(c) {
var k = 0
// println("Running ChiselTopWrapper!")
// poke(c.io.cmd.valid, 1)
// poke(c.io.cmd.bits.addr, 0)
// poke(c.io.cmd.bits.nodeId, 0)
// poke(c.io.cmd.bits.xStep, 1)
// poke(c.io.cmd.bits.yStep, 0x100)
// poke(c.io.cmd.bits.xCnt, 0x1B3)
// poke(c.io.cmd.bits.yCnt, 0x4)
// while(k < 200) {
// poke(c.io.out.ready, 1)
// step(1)
// poke(c.io.out.ready, 0)
// k = k+1
// step(1)
// }
// step(100)
println("Running ChiselTopWrapper!")
step(5)
poke(c.io.cmd.src.addr, 0x100)
poke(c.io.cmd.src.nodeId, 0)
poke(c.io.cmd.src.xStep, 1)
poke(c.io.cmd.src.yStep, 0x100)
poke(c.io.cmd.src.xCnt, 0x100)
poke(c.io.cmd.src.yCnt, 0x4)
poke(c.io.cmd.dest.addr, 0x400)
poke(c.io.cmd.dest.nodeId, 23)
poke(c.io.cmd.dest.xStep, 1)
poke(c.io.cmd.dest.yStep, 0x100)
poke(c.io.cmd.dest.xCnt, 0x100)
poke(c.io.cmd.dest.yCnt, 0x4)
poke(c.io.cmd.start, 1)
poke(c.io.cmd.mode, 1) //DSM to NoC
while((peek(c.io.done) == 0) && k < 600) {
step(1)
k = k+1
}
poke(c.io.cmd.start, 0)
step(10)
}
class ChiselTopWrapper(implicit p: Parameters) extends MultiIOModule {
val top = Module(LazyModule(new CRTemp).module)
val io = IO(top.io.cloneType)
io <> top.io
// val top = Module(new AddressGenerator)
// val io = IO(top.io.cloneType)
// io <> top.io
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/dma/Config.scala
|
package dma
import chisel3._
import freechips.rocketchip.config.{Parameters, Field, Config}
class BaseConfig extends Config(
new WithDMA(true) ++
new WithDSM(true))
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/Subsystem.scala
|
package redefine
import chisel3._
import chisel3.internal.sourceinfo.{SourceInfo}
import freechips.rocketchip.config.{Field, Parameters}
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.devices.debug.{HasPeripheryDebug, HasPeripheryDebugModuleImp}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.diplomaticobjectmodel.model.{OMInterrupt}
import freechips.rocketchip.diplomaticobjectmodel.logicaltree.{RocketTileLogicalTreeNode, LogicalModuleTree}
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.interrupts._
import freechips.rocketchip.util._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.amba.axi4._
class REDEFINESubsystem(implicit p: Parameters) extends BaseSubsystem
with HasRocketTiles
{
val tiles = rocketTiles
// add Mask ROM devices
val maskROMs = p(PeripheryMaskROMKey).map { MaskROM.attach(_, cbus) }
val hartPrefixNode = if (p(HartPrefixKey)) {
Some(BundleBroadcast[UInt](registered = true))
} else {
None
}
val hartPrefixes = hartPrefixNode.map { hpn => Seq.fill(tiles.size) {
val hps = BundleBridgeSink[UInt]
hps := hpn
hps
} }.getOrElse(Nil)
override lazy val module = new REDEFINESubsystemModuleImp(this)
}
trait HasRocketTilesModuleImp extends HasTilesModuleImp
with HasPeripheryDebugModuleImp {
val outer: HasRocketTiles
}
trait HasRocketTiles extends HasTiles
with CanHavePeripheryPLIC
with CanHavePeripheryCLINT
with HasPeripheryDebug { this: BaseSubsystem =>
val module: HasRocketTilesModuleImp
protected val rocketTileParams = p(RocketTilesKey)
private val crossings = perTileOrGlobalSetting(p(RocketCrossingKey), rocketTileParams.size)
// Make a tile and wire its nodes into the system,
// according to the specified type of clock crossing.
// Note that we also inject new nodes into the tile itself,
// also based on the crossing type.
val rocketTiles = rocketTileParams.zip(crossings).map { case (tp, crossing) =>
val rocket = LazyModule(new RocketTile(tp, crossing, PriorityMuxHartIdFromSeq(rocketTileParams), logicalTreeNode))
connectMasterPortsToSBus(rocket, crossing)
connectSlavePortsToCBus(rocket, crossing)
connectInterrupts(rocket, debugOpt, clintOpt, plicOpt)
rocket
}
rocketTiles.map {
r =>
def treeNode: RocketTileLogicalTreeNode = new RocketTileLogicalTreeNode(r.rocketLogicalTree.getOMInterruptTargets)
LogicalModuleTree.add(logicalTreeNode, r.rocketLogicalTree)
}
def coreMonitorBundles = (rocketTiles map { t =>
t.module.core.rocketImpl.coreMonitorBundle
}).toList
}
class REDEFINESubsystemModuleImp[+L <: REDEFINESubsystem](_outer: L) extends BaseSubsystemModuleImp(_outer)
with HasResetVectorWire
with HasRocketTilesModuleImp
{
for (i <- 0 until outer.tiles.size) {
val wire = tile_inputs(i)
val prefix = outer.hartPrefixes.lift(i).map(_.bundle).getOrElse(0.U)
wire.hartid := prefix | outer.hartIdList(i).U
wire.reset_vector := global_reset_vector
}
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/dma/DummyDMA.scala
|
<filename>generators/redefine/src/main/scala/dma/DummyDMA.scala
package dma
import chisel3._
import chisel3.util._
import freechips.rocketchip.config.{Parameters, Field, Config}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util._
import testchipip.TLHelper
class CRTemp(implicit p: Parameters) extends LazyModule {
//Can be used when we need to verify if we're reading appropriately
//val rom = LazyModule(new TLROM(
// base = 0,
// size = 0x10000,
// contentsDelayed = Seq.tabulate(0x10000) {i=>i.toByte},
// beatBytes = 32))
//val ram0 = LazyModule(new ManagerTL)
val rom = LazyModule(new TLRAM(address=AddressSet(0, 0xfffff),
beatBytes = 32))
val ram = LazyModule(new TLRAM(address=AddressSet(0, 0xffffff),
beatBytes = 32))
val dma = LazyModule(new DMA)
rom.node := TLFragmenter(32, p(DMAKey).maxBurst * 32) := dma.dsm
ram.node := TLFragmenter(32, p(DMAKey).maxBurst * 32) := dma.noc
lazy val module = new LazyModuleImp(this) {
val io = IO(dma.module.io.cloneType)
io <> dma.module.io
}
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/Config.scala
|
<reponame>mysoreanoop/chipyard
package redefine
import freechips.rocketchip.config.{Config}
// --------------
// Rocket Configs
// --------------
class REDEFINEConfig extends Config( // has RocketChip configs only for now; can append our configs later!
new testchipip.WithTSI ++ // use testchipip serial offchip link
new freechips.rocketchip.subsystem.WithNoMMIOPort ++ // no top-level MMIO master port (overrides default set in rocketchip)
new freechips.rocketchip.subsystem.WithNoSlavePort ++ // no top-level MMIO slave port (overrides default set in rocketchip)
new freechips.rocketchip.subsystem.WithNExtTopInterrupts(0) ++ // no external interrupts
new freechips.rocketchip.subsystem.WithNBigCores(1) ++ // single rocket-core
new freechips.rocketchip.subsystem.WithCoherentBusTopology ++ // hierarchical buses including mbus+l2
new freechips.rocketchip.system.BaseConfig) // "base" rocketchip system
|
mysoreanoop/chipyard
|
generators/redefine/src/test/scala/dma/DMAMain.scala
|
<filename>generators/redefine/src/test/scala/dma/DMAMain.scala
// See README.md for license details.
package dma
import chisel3._
import freechips.rocketchip.config.{Parameters, Config}
object DMAMain extends App {
implicit val p: Parameters = new BaseConfig//Parameters.empty
iotesters.Driver.execute(args, () => new ChiselTopWrapper) {
c => new TBDMATop(c)
}
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/stage/Stage.scala
|
<gh_stars>0
package redefine.stage
import chisel3.stage.{ChiselCli, ChiselStage}
import firrtl.options.PhaseManager.PhaseDependency
import firrtl.options.{Phase, PreservesAll, Shell}
import firrtl.stage.FirrtlCli
import freechips.rocketchip.stage.RocketChipCli
import freechips.rocketchip.system.RocketChipStage
import firrtl.options.{Phase, PhaseManager, PreservesAll, Shell, Stage, StageError, StageMain, Dependency}
import firrtl.options.phases.DeletedWrapper
class REDEFINEStage extends ChiselStage with PreservesAll[Phase] {
override val shell = new Shell("redefine") with ChipyardCli with RocketChipCli with ChiselCli with FirrtlCli
override val targets: Seq[PhaseDependency] = Seq(
Dependency[freechips.rocketchip.stage.phases.Checks],
Dependency[freechips.rocketchip.stage.phases.TransformAnnotations],
Dependency[freechips.rocketchip.stage.phases.PreElaboration],
Dependency[chisel3.stage.phases.Checks],
Dependency[chisel3.stage.phases.Elaborate],
Dependency[freechips.rocketchip.stage.phases.GenerateROMs],
Dependency[chisel3.stage.phases.AddImplicitOutputFile],
Dependency[chisel3.stage.phases.AddImplicitOutputAnnotationFile],
Dependency[chisel3.stage.phases.MaybeAspectPhase],
Dependency[chisel3.stage.phases.Emitter],
Dependency[chisel3.stage.phases.Convert],
Dependency[freechips.rocketchip.stage.phases.GenerateFirrtlAnnos],
Dependency[freechips.rocketchip.stage.phases.AddDefaultTests],
Dependency[freechips.rocketchip.stage.phases.GenerateArtefacts],
)
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/dma/SimpleMem.scala
|
package dma
import chisel3._
import chisel3.util._
import chisel3.util.random._
import chisel3.experimental.chiselName
import freechips.rocketchip.config.{Parameters, Field, Config}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util._
import testchipip.TLHelper
case class DSMParams(
temp: Boolean,
dsmSize: BigInt = 0x10000,
nPorts: Int = 8,
dataWidth: Int = 32,
addrWidth: Int = 32) {
require(dsmSize%nPorts==0, "DSM size has to be in multiples of port size")
}
//class ManagerTL(implicit p: Parameters) extends LazyModule {
// val device = new SimpleDevice("ManagerTL", Seq())
// val beatBytes = 32
// val node = TLHelper.makeManagerNode(beatBytes, TLSlaveParameters.v1(
// address = Seq(AddressSet(0x0, 0xffff)),
// resources = device.reg,
// regionType = RegionType.UNCACHED,
// executable = true,
// supportsGet = TransferSizes(1, beatBytes),
// supportsPutFull = TransferSizes(1, beatBytes),
// supportsPutPartial = TransferSizes(1, beatBytes)))
// lazy val module = new LazyModuleImp(this) {
// val (tl, edge) = node.in(0)
// val mem = Module(new mkDataMem).io
// mem.wrAddr.valid := false.B
// mem.wrAddr.bits := DontCare
// mem.wrData := DontCare
// mem.strobe := DontCare
// mem.rdAddr.valid := false.B
// mem.rdAddr.bits := DontCare
//
// def groupBy4(in: UInt): Vec[Bool] = {
// val out = RegInit(VecInit(Seq.fill(8) (false.B)))
// for(i <- 0 until in.getWidth/4) {
// out(i) := in(i*4+3, i*4)
// }
// out
// }
//
// val aReady = RegInit(false.B)
// tl.a.ready := aReady
// val dValid = RegInit(false.B)
// val bytes = RegInit(0.U(8.W))
// tl.d.valid := dValid
// tl.d.bits := edge.AccessAck(tl.a.bits)
// //XXX Completely ignoring mask here!
// when(tl.a.valid) {
// //assert(tl.a.bits.size < 2.U, "Reading less than a word or nothing!")
// assert(tl.a.bits.address(1,0) === 0.U, "Address not aligned to word boundary!")
// when(tl.a.bits.opcode === 0.U || tl.a.bits.opcode === 1.U) {
// when(tl.a.bits.size <= 32.U) {
// //Single beat (with strobe)
// assert(bytes === 0.U, "Last burst txn was abandoned midway!")
// mem.wrAddr.valid := true.B
// mem.wrAddr.bits := tl.a.bits.address(15, 5)
// mem.wrData := tl.a.bits.data.asTypeOf(Vec(8, UInt(32.W)))
// //mem.strobe := Mux(tl.a.bits.size === 5.U, Fill(8, 1.U),
// // //MaskGen(tl.a.bits.addr, tl.a.bits.size, 32, 4))
// // ((1 << tl.bits.size)-1 << tl.a.bits.addr(4,2)))
// //^ cannot assert a condition here, so writer be safe!
// mem.strobe := groupBy4(tl.a.bits.mask)
// tl.a.ready := true.B
// dValid := true.B
// } .otherwise {
// //Multi beat
// dValid := false.B
// bytes := Mux(bytes === 0.U, tl.a.bits.size - 32.U,
// Mux(bytes < 32.U, 0.U, bytes - 32.U))
// mem.wrAddr.valid := true.B
// mem.wrAddr.bits := Mux(bytes === 0.U, tl.a.bits.address(15,5),
// tl.a.bits.address(15,5) + 32.U * bytes)
// mem.wrData := tl.a.bits.data.asTypeOf(Vec(8, UInt(32.W)))
// mem.strobe := groupBy4(tl.a.bits.mask)
// tl.a.ready := true.B
// dValid := bytes <= 32.U
// }
// } .elsewhen(tl.a.bits.opcode === 4.U) {
// //Mem read operations
// tl.a.ready := true.B
// aReady := false.B
// when(tl.a.bits.size <= 3.U) {
// dValid := true.B
// }
//
// }
// }
// }
//}
//memories
/* Data storage: Simple Dual-Port, Synchronous Write & Synhcronous read
* Organization: 8 * 4096 x 32-bit module
* Addressing format:
* Port select : offset[4:2]
* Addr within port: {index, way, offset[9:5]}
* Note: If both read and write are active at the time for the same addr,
* read is prioritized; For the write to complete,
* stop reading from the competing address.
*/
//class mkDataMem(implicit p: Parameters) extends Module {
// val c = p(DSMKey)
// val io = IO(new Bundle{
// val rdAddr = Flipped(Decoupled(UInt(c.addrWidth.W)))
// val rdData = Vec(c.nPorts, Output(UInt(c.dataWidth.W)))
// val wrAddr = Flipped(Decoupled(UInt(c.addrWidth.W)))
// val wrData = Vec(c.nPorts, Input(UInt(c.dataWidth.W)))
// val strobe = Vec(c.nPorts, Input(Bool()))
// })
//
// val storage = Seq.fill(c.nPorts) {SyncReadMem(c.dsmSize/c.nPorts, UInt(c.dataWidth.W))}
// val rdData = Seq.fill(c.nPorts) {RegInit(0.U(c.dataWidth.W))}
// io.rdAddr.ready := true.B
// when(io.rdAddr.valid) {
// for(i <- 0 until c.nPorts) {
// rdData(i) := storage(i).read(io.rdAddr.bits)
// }
// }
// io.rdData := rdData
//
// io.wrAddr.ready := Mux(io.rdAddr.valid, io.wrAddr.bits =/= io.rdAddr.bits, true.B)
// when(io.wrAddr.valid && io.wrAddr.bits =/= io.rdAddr.bits) {
// for(i <- 0 until c.nPorts) {
// when(io.strobe(i)) {
// storage(i).write(io.wrAddr.bits, io.wrData(i))
// }
// }
// }
//}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/stage/ChipyardAnnotations.scala
|
// See LICENSE for license details.
// Based on Rocket Chip's stage implementation
package redefine.stage
import freechips.rocketchip.stage.ConfigsAnnotation
import firrtl.options.{HasShellOptions, ShellOption}
/** This hijacks the existing ConfigAnnotation to accept the legacy _-delimited format */
private[stage] object UnderscoreDelimitedConfigsAnnotation extends HasShellOptions {
override val options = Seq(
new ShellOption[String](
longOption = "legacy-configs",
toAnnotationSeq = a => {
val split = a.split('.')
val packageName = split.init.mkString(".")
val configs = split.last.split("_")
Seq(new ConfigsAnnotation(configs map { config => s"${packageName}.${config}" } ))
},
helpText = "A string of underscore-delimited configs (configs have decreasing precendence from left to right).",
shortOption = Some("LC")
)
)
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/dma/DMA.scala
|
<filename>generators/redefine/src/main/scala/dma/DMA.scala
package dma
import chisel3._
import chisel3.util._
import chisel3.util.random._
import chisel3.experimental.chiselName
import freechips.rocketchip.config.{Parameters, Field, Config}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util._
import testchipip.TLHelper
case class DMAParams(
//TODO split into multiple appropriate Config classes later
temp: Boolean,
dataWidth: Int = 32,
addrWidth: Int = 32,
addrCtrlWidth: Int = 16,
idWidth: Int = 8,
txnIdWidth:Int = 4,
beatBytes: Int = 32,
maxBurst: Int = 4,
fifoDepth: Int = 512,
nOutstanding: Int = 4,
maxDMAReqs: Int = 4) {
//derive from top config?
val busWidth = beatBytes * 8
val wordsPerBeat = busWidth/dataWidth
val maxWords = maxBurst * wordsPerBeat
val maxBytes = maxBurst * beatBytes
val lgMaxBytes = log2Up(maxBytes)
}
case object DSMKey extends Field[DSMParams]
case object DMAKey extends Field[DMAParams]
class WithDSM(in:Boolean) extends Config((site, here, up) => {
case DSMKey => DSMParams(temp = in)
})
class WithDMA(in: Boolean) extends Config((site, here, up) => {
case DMAKey => DMAParams(temp = in)
})
class PortParam (implicit p: Parameters) extends Bundle {
val c = p(DMAKey)
//XXX currently xStep is ignored (1 is the default)
val xStep, yStep = UInt(c.addrCtrlWidth.W)
val xCnt, yCnt = UInt(c.addrCtrlWidth.W)
val addr = UInt(c.addrWidth.W)
val nodeId = UInt(c.idWidth.W)
override def cloneType = (new PortParam).asInstanceOf[this.type]
}
class CSRBundle(implicit p: Parameters) extends Bundle {
val src = new PortParam
val dest = new PortParam
//val txnId = UInt(p(DMAKey).txnIdWidth.W)
val mode = Bool()//choose false: noc->dsm or true: dsm->noc
val start = Bool()
override def cloneType = (new CSRBundle).asInstanceOf[this.type]
}
class DMA(implicit p: Parameters) extends LazyModule {
val size = p(DSMKey).dsmSize
val noc = TLHelper.makeClientNode(TLMasterParameters.v1(
name = "dmaSlaveToNoC",
sourceId = IdRange(0, 16),
requestFifo = true,
visibility = Seq(AddressSet(0x0, 0xffffff))))
val dsm = TLHelper.makeClientNode(TLMasterParameters.v1(
name = "dmaSlaveToDSM",
sourceId = IdRange(0, 16),
requestFifo = true,
visibility = Seq(AddressSet(0x0, 0xffffff))))
lazy val module = new DMAModule(this)
}
@chiselName
class DMAModule(outer: DMA) extends LazyModuleImp(outer) {
val c = p(DMAKey)
val (noc, nocEdge) = outer.noc.out(0)
val (dsm, dsmEdge) = outer.dsm.out(0)
val io = IO(new Bundle {
val cmd = Input(new CSRBundle)
//val busy = Output(Bool())
val done = Output(Bool()) //interrupt
val error = Output(Bool()) //interrupt
val dsmDoneA = Output(Bool())
val nocDoneA = Output(Bool())
val dsmDoneD = Output(Bool())
val nocDoneD = Output(Bool())
})
/* TODO Make sure to update registers only when busy is low!
* No asserts provided! */
val cmd = RegInit({val x = Wire(new CSRBundle);
x := DontCare; x})
cmd := io.cmd
//Mode select: DSM->NoC or NoC->DSM
//when(cmd.mode) {
val dst = Wire(new TLBundle(nocEdge.bundle))
val src = Wire(new TLBundle(dsmEdge.bundle))
//} .otherwise {
// val dst = Wire(new TLBundle(dsmEdge.bundle))
// val src = Wire(new TLBundle(nocEdge.bundle))
//}
dst.a.ready := Mux(cmd.mode, noc.a.ready, dsm.a.ready)
dst.d.bits := DontCare//Mux(cmd.mode, noc.d.bits , dsm.d.bits )
dst.d.valid := Mux(cmd.mode, noc.d.valid, dsm.d.valid)
noc.a.valid := Mux(cmd.mode, dst.a.valid, src.a.valid)
noc.a.bits := DontCare//Mux(cmd.mode, dst.a.bits , src.a.bits )
noc.d.ready := Mux(cmd.mode, dst.d.ready, src.d.ready)
dsm.a.valid := Mux(cmd.mode, src.a.valid, dst.a.valid)
dsm.a.bits := DontCare//Mux(cmd.mode, src.a.bits , dst.a.bits )
dsm.d.ready := Mux(cmd.mode, src.d.ready, dst.d.ready)
src.a.ready := Mux(cmd.mode, dsm.a.ready, noc.a.ready)
src.d.bits := DontCare//Mux(cmd.mode, dsm.d.bits , noc.d.bits )
src.d.valid := Mux(cmd.mode, dsm.d.valid, noc.d.valid)
src.a.bits := DontCare
dst.a.bits := DontCare
val _dAddr = Module(new AddressGenerator).suggestName("dAddr")
val _sAddr = Module(new AddressGenerator).suggestName("sAddr")
val dAddr = _dAddr.io
val sAddr = _sAddr.io
dAddr.cmd <> cmd.dest
sAddr.cmd <> cmd.src
dAddr.start := cmd.start
sAddr.start := cmd.start
val error = WireDefault(Cat(
noc.d.bits.denied & noc.d.fire(),
dsm.d.bits.denied & dsm.d.fire()))
assert(cmd.src.xCnt * cmd.src.yCnt === cmd.dest.xCnt * cmd.dest.yCnt,
"Total bytes to be read is not equal to the total bytes to be written!")
//TODO boundary compliance asserts
//Agent conflict asserts
//Address range conflict asserts
val sAddrDone = RegInit(false.B)
val lastWrReqSent = RegInit(false.B)
val done = WireDefault(lastWrReqSent && Mux(cmd.mode, nocEdge.done(noc.d), dsmEdge.done(dsm.d)))
io.done := done
when(Mux(cmd.mode, nocEdge.done(noc.a), dsmEdge.done(dsm.a)) && dAddr.out.bits.last) {
lastWrReqSent := true.B
} .elsewhen(lastWrReqSent && done) {
lastWrReqSent := false.B
}
when(sAddr.out.bits.last && src.a.fire && ~done) {
sAddrDone := true.B
} .elsewhen(sAddrDone && done) {
sAddrDone := false.B
}
//val busy = RegInit(false.B)
//io.busy := busy
//when(cmd.start) {
// busy := true.B
//} .elsewhen(done) {
// busy := false.B
//}
val sIds = RegInit(0.U(4.W))
val dIds = RegInit(0.U(4.W))
when(src.a.fire()) { sIds := sIds+1.U }
io.nocDoneA := nocEdge.done(noc.a)
io.dsmDoneA := dsmEdge.done(dsm.a)
io.nocDoneD := nocEdge.done(noc.d)
io.dsmDoneD := dsmEdge.done(dsm.d)
when(dst.a.fire() && Mux(cmd.mode, nocEdge.done(noc.a), dsmEdge.done(dsm.a))) {
dIds := dIds+1.U
}
val q = Module(new Queue(UInt((c.beatBytes*8).W), c.fifoDepth)).suggestName("q")
q.io.enq.valid := src.d.valid
q.io.enq.bits := Mux(cmd.mode, dsm.d.bits.data, noc.d.bits.data)
src.d.ready := q.io.enq.ready
src.a.valid := cmd.start && sAddr.out.valid && ~sAddrDone
sAddr.out.ready := src.a.fire() && ~sAddrDone
/* Note:
* Mask for Get transactions need to be applied when storing the
* Get results (from dsm.a) in the mainQueue*/
//assert(get._1, "Illegal access!")
q.io.deq.ready := dst.a.fire()
dst.a.valid := q.io.deq.valid && cmd.start && dAddr.out.valid
dAddr.out.ready := q.io.deq.valid && Mux(cmd.mode, nocEdge.done(noc.a), dsmEdge.done(dsm.a))
//assert(put._1, "Illegal access!")
//dst.a.bits := Mux(cmd.mode, putNoC._2, putDSM._2)
dst.d.ready := true.B//ignoring everything
when(cmd.mode) {
dsm.a.bits := dsmEdge.Get(
fromSource = sIds,
toAddress = sAddr.out.bits.addr,
lgSize = sAddr.out.bits.size)._2 //TODO mask?
noc.a.bits := nocEdge.Put(
fromSource = dIds,
toAddress = dAddr.out.bits.addr,
lgSize = dAddr.out.bits.size,
data = q.io.deq.bits,
mask = dAddr.out.bits.mask)._2
} .otherwise {
noc.a.bits := nocEdge.Get(
fromSource = sIds,
toAddress = sAddr.out.bits.addr,
lgSize = sAddr.out.bits.size)._2 //TODO mask?
dsm.a.bits := dsmEdge.Put(
fromSource = dIds,
toAddress = dAddr.out.bits.addr,
lgSize = dAddr.out.bits.size,
data = q.io.deq.bits,
mask = dAddr.out.bits.mask)._2
}
/* Throw an error out based on the error signal in d channel
* Assuming that we send out valid requests all the time*/
io.error := error.orR
/* Holds the latest "Put" transaction status; TODO expose it to IO somehow
* In case something fails, we could look into this to see what error was seen
* in the Put port. The Get port error is also similarly logged.*/
val debugNoC = RegInit(noc.d.bits)
val debugDSM = RegInit(noc.d.bits)
/* Problems faced when allowing masked transactions:
* 1. Start address alignment and row-end-address alignment errors (TL specific)
* 2. Storing the masked read data in an intermediate queue and then feeding it to the
* destination (write) port according to the destinations mask requires complex logic
* to select appropriate words from the queue. */
/* Future work:
* Between the queue.io and writer, and just before the reader, we could introduce
* the bus holding logic useful for interleaved txns over a single bus between
* both Orch and DMA on one side and DSM on the other side.
* Currently, we assume that the Orchestrator to DSM reqs and DMA to DSM reqs
* pass through an intermediate TLXbar which follows an RR arbitration policy.
*
* dAddr should generate addresses and operate independently of sAddr. So once
* srcAddr is done with its generation, it can move on to the next DMA command
* while dstAddr is still processing the previous request.
* However, if the direction of data transfer (DSM <> NoC) changes, then this
* would not present any obvious advantage because the put port will be ready
* before the queue.io from where the data is sourced is populated.
* But considering once the initial configuration data is downloaded from
* NoC to DSM, all succeeding transactions will likely be from DSM to NoC,
* having this feature implemented would be intermittantly advantageous.*/
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/dma/AddrGen.scala
|
package dma
import chisel3._
import chisel3.util._
import chisel3.util.Reverse
import freechips.rocketchip.config.{Parameters, Field, Config}
class ProcBun(addrCtrlWidth: Int, addrWidth: Int) extends Bundle {
val xCnt = UInt(addrCtrlWidth.W)
val yCnt = UInt(addrCtrlWidth.W)
val addr = UInt(addrWidth.W)
override def cloneType = (new ProcBun(addrCtrlWidth, addrWidth)).asInstanceOf[this.type]
}
class AddrOut(implicit p: Parameters) extends Bundle {
val c = p(DMAKey)
val addr = UInt(c.addrWidth.W)
val size = UInt(log2Up(c.lgMaxBytes).W)
val mask = UInt(c.beatBytes.W)
val last = Bool()
override def cloneType = (new AddrOut).asInstanceOf[this.type]
}
/* Produces address to read from or write to*/
class AddressGenerator(implicit p: Parameters) extends Module {
val c = p(DMAKey)
val io = IO(new Bundle {
val cmd = Input(new PortParam)
val start = Input(Bool())
val out = Decoupled(new AddrOut)
})
val cmd = RegInit({val n = Wire(new ProcBun(c.addrCtrlWidth, c.addrWidth));
n := DontCare; n })
val last = calcLast(cmd.xCnt, cmd.yCnt)
io.out.valid := io.start
when(catchRisingEdge(io.start)) {
io.out.valid := false.B
//when(io.req) {
// io.out.bits.addr := io.cmd.addr
// io.out.bits.size := calcSize(io.cmd.xCnt)
// io.out.bits.mask := calcMask(io.cmd.xCnt)
// io.out.bits.last := calcLast(io.cmd.xCnt, io.cmd.yCnt)
// cmd := calcAddr({val x = Wire(new ProcBun(c.addrCtrlWidth, c.addrWidth));
// x.xCnt := io.cmd.xCnt
// x.yCnt := io.cmd.yCnt
// x.addr := io.cmd.addr
// x
// })
//} .otherwise {
cmd.xCnt := io.cmd.xCnt
cmd.yCnt := io.cmd.yCnt
cmd.addr := io.cmd.addr
//}
} .elsewhen(io.out.fire()) {
cmd := calcAddr(cmd)
}
io.out.bits.addr := cmd.addr
io.out.bits.size := calcSize(cmd.xCnt)
io.out.bits.mask := calcMask(cmd.xCnt)
io.out.bits.last := calcLast(cmd.xCnt, cmd.yCnt)
def catchRisingEdge(n: Bool): Bool = { n && ~RegNext(n) }
def calcSize(x: UInt): UInt = {
val out = Wire(UInt(c.maxWords.W))
when(x >= c.maxWords.U) {
out := c.lgMaxBytes.U
} .otherwise {
out := 0.U//means 1 beat
}
out
}
def calcMask(x: UInt): UInt = {
def createMask(n: UInt): UInt = {
//Reverse((1.U << n) - 1.U) TODO hangs!!!
(1.U << n) - 1.U
}
val out = Wire(UInt(c.beatBytes.W))
when(x >= c.wordsPerBeat.U) {
out := Fill(c.beatBytes, 1.U)
} .otherwise {
out := createMask(x)
//^for the remaining words that cannot make up an entire beat
}
out
}
def calcLast(x: UInt, y: UInt): Bool = {
val out = WireInit(false.B)
when(y === 0.U) {
when((x === c.maxWords.U || x <= c.wordsPerBeat.U)
&& ~catchRisingEdge(io.start)) {
out := true.B
}
}
out
}
def calcAddr(n: ProcBun) = {
val out = Wire(new ProcBun(c.addrWidth, c.addrWidth))
out := n
when(n.xCnt > 0.U) {
/*This exploits busWidth and burst advantages.*/
when(n.xCnt >= c.maxWords.U) {
when(n.xCnt === c.maxWords.U) {
out.xCnt := Mux(n.yCnt > 0.U, io.cmd.xCnt, 0.U)
out.yCnt := Mux(n.yCnt === 0.U, 0.U, n.yCnt - 1.U)
out.addr := n.addr + c.maxBytes.U + io.cmd.yStep * (c.dataWidth/8).U
} .otherwise {
out.xCnt := n.xCnt - c.maxWords.U
out.addr := n.addr + c.maxBytes.U
}
} .otherwise {
/* Cut it up into as many single beats and one final masked beat
* Determining single max possible burstSize is costly*/
when(n.xCnt >= c.wordsPerBeat.U) {
when(n.xCnt === c.wordsPerBeat.U) {
out.xCnt := Mux(n.yCnt > 0.U, io.cmd.xCnt, 0.U)
out.addr := n.addr + c.beatBytes.U + io.cmd.yStep * (c.dataWidth/8).U
out.yCnt := Mux(n.yCnt === 0.U, 0.U, n.yCnt - 1.U)
} .otherwise {
out.xCnt := n.xCnt - c.wordsPerBeat.U
out.addr := n.addr + c.beatBytes.U
}
} .otherwise {//masked beat
out.xCnt := Mux(n.yCnt > 0.U, io.cmd.xCnt, 0.U)
out.addr := n.addr + n.xCnt * (c.dataWidth/8).U
out.yCnt := Mux(n.yCnt === 0.U, 0.U, n.yCnt - 1.U)
}
}
} .otherwise {
//don't come here!
}
out
}
}
|
mysoreanoop/chipyard
|
generators/redefine/src/main/scala/core/Generator.scala
|
package redefine
import firrtl.options.{StageMain}
import redefine.stage.REDEFINEStage
object Generator extends StageMain(new REDEFINEStage)
|
sehirsig/malefiz
|
src/main/scala/de/htwg/se/malefiz/controller/controllerComponent/controllerStubImpl/Controller.scala
|
<gh_stars>1-10
package de.htwg.se.malefiz.controller.controllerComponent.controllerStubImpl
import de.htwg.se.malefiz.controller.controllerComponent._
import de.htwg.se.malefiz.controller.controllerComponent.GameStatus._
import de.htwg.se.malefiz.model.cellComponent._
import de.htwg.se.malefiz.model.gameComponent._
import de.htwg.se.malefiz.model.gameboardComponent
import de.htwg.se.malefiz.model.gameboardComponent.{GameboardInterface, lastSaveInterface}
import de.htwg.se.malefiz.model.playerComponent._
import scala.swing.Publisher
/** A stub implementation of our Controller.
* Descriptions of the function in the base implementation.
*
* @author sehirsig & franzgajewski
*/
case class Controller(var gameboard: GameboardInterface) extends ControllerInterface with Publisher{
override var gameStatus: GameStatus = IDLE
override var playerStatus: PlayerState = PlayerState1
override var moveCounter: Int = 0
override val builder: PlayerBuilder = PlayerBuilderImp()
override var game: Game = Game(Vector[Player]())
override var gameWon: (Boolean, String) = (false, "")
override var savedGame: lastSaveInterface = gameboardComponent.lastSave(0, "", InvalidCell)
override var selectedFigNum: Int = 0;
override def getpureCell(name: String): Cell = InvalidCell
override def resetGame(): Unit = {}
override def selectFigure(x: Int): Unit = {}
override def addPlayer(): Unit = {}
override def addPlayerName(name: String): Unit = {}
override def startGame(): Unit = {}
override def setupGame(): Unit = {}
override def boardToString(): String = ""
override def rollDice(): Int = 0
override def checkWin(): Unit = {}
override def setBlockStrategy(blockStrategy: String): Unit = {}
override def move(input: String, figurenum: Int): Unit = {}
override def emptyMan: Unit = {}
override def undoAll: Unit = {}
override def undo: Unit = {}
override def redo: Unit = {}
override def save: Unit = {}
override def load: Unit = {}
override def addPlayerDEBUGWINTEST(name: String): Unit = {}
override def debugDice(): Unit = {}
}
|
sehirsig/malefiz
|
src/test/scala/de/htwg/se/malefiz/model/gameComponent/GameSpec.scala
|
<gh_stars>1-10
package de.htwg.se.malefiz.model.gameComponent
import de.htwg.se.malefiz.model.playerComponent.Player
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
/** Test class for the Game class.
*
* @author sehirsig & franzgajewski
*/
class GameSpec extends AnyWordSpec with Matchers {
"A Game" when {
"new" should {
val player1 = Player("alice", 1, (14,3))
val player2 = Player("bob", 2, (14,7))
val game = Game(Vector(player1,player2))
"have a quantity" in {
game.getPlayerNumber() should be(2)
}
"have players" in {
game.players(0).name should be("alice")
}
"add player" in {
val player3 = Player("charly", 3, (14,11))
val game2 = game.addPlayer(player3)
game2.getPlayerNumber() should be(3)
}
}
}
}
|
sehirsig/malefiz
|
src/test/scala/de/htwg/se/malefiz/controller/controllerComponent/PlayerStateSpec.scala
|
package de.htwg.se.malefiz.controller.controllerComponent
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
/** Test class for the PlayerState class.
*
* @author sehirsig & franzgajewski
*/
class PlayerStateSpec extends AnyWordSpec with Matchers {
"A PlayerState" when {
"should" should {
var playerStatus: PlayerState = PlayerState1
"iterate correctly with 2 Players" in {
playerStatus.getCurrentPlayer should be (1)
playerStatus = playerStatus.nextPlayer(2)
playerStatus.getCurrentPlayer should be (2)
playerStatus = playerStatus.nextPlayer(2)
playerStatus.getCurrentPlayer should be (1)
}
"iterate correctly with 3 Players" in {
playerStatus = playerStatus.nextPlayer(3)
playerStatus.getCurrentPlayer should be (2)
playerStatus = playerStatus.nextPlayer(3)
playerStatus.getCurrentPlayer should be (3)
playerStatus = playerStatus.nextPlayer(3)
playerStatus.getCurrentPlayer should be (1)
}
"iterate correctly with 4 Players" in {
playerStatus = playerStatus.nextPlayer(4)
playerStatus.getCurrentPlayer should be (2)
playerStatus = playerStatus.nextPlayer(4)
playerStatus.getCurrentPlayer should be (3)
playerStatus = playerStatus.nextPlayer(4)
playerStatus.getCurrentPlayer should be (4)
playerStatus = playerStatus.nextPlayer(4)
playerStatus.getCurrentPlayer should be (1)
}
}
}
}
|
sehirsig/malefiz
|
src/main/scala/de/htwg/se/malefiz/model/gameboardComponent/GameboardInterface.scala
|
<reponame>sehirsig/malefiz
package de.htwg.se.malefiz.model.gameboardComponent
import de.htwg.se.malefiz.model.cellComponent.Cell
import de.htwg.se.malefiz.model.playerComponent.Player
import scala.util.Try
/** Interface for the game board implementation.
*
* @author sehirsig & franzgajewski
*/
trait GameboardInterface {
/** Game board matrix. */
val rows:Vector[Vector[Cell]]
/** Change of the block strategy.
*
* @param blockstrategy "remove" or "replace"
*/
def setBlockStrategy(blockstrategy: String): Unit
/** Function for handling barricades according to the block strategy.
*
* @param spielbrett old game board
* @return new game board with changed blocking
*/
def replaceBlocks(spielbrett: GameboardInterface): GameboardInterface
/** New game board in the dimensions given in the settings class.
*
* @return new game board
*/
def newGBStandardSize: GameboardInterface
/** Retrieve default size from the settings class.
*
* @return tupel of the default x and y dimensions of the game board
*/
def getStandardXYsize: (Int,Int)
/** Get name of cell (e.g. for JSON/XMY storage).
* @param cell
*
* @return string representation of cell
*/
def getStringOfCell(cell:Cell): String
/** Returns a cell as coordinates of the game board.
*
* @param row x-coordinate of the matrix
* @param col y-coordinate of the matric
* @return cell at the given coordinates
*/
def cell(row: Int, col: Int): Cell
/** Takes cell coordinates and returns corresponding string representation.
*
* @param row x coordinate
* @param col y coordinate
*
* @return string representation of cell
*/
def cellString(row: Int, col: Int): String
/** Change a cell on the game board.
*
* @param row x-coordinate of the matrix
* @param col y-coordinate of the matric
* @param cell cell to be replaced
* @return new game board
*/
def replaceCell(row: Int, col: Int, cell: Cell): Try[GameboardInterface]
/** Move a player.
*
* @param coord coordinate as an int-tupel of the matrix
* @param cell cell to be replaced
* @return new game board
*/
def movePlayer(coord: (Int, Int), cell: Cell): GameboardInterface
/** Check, if walking up is legal and do so if possible.
*
* @param spielbrett old game board
* @param player current player
* @param currentCoord coordinates of current figure
* @param figurenum number of figure
* @param walksLeft number of remaining movements
*
* @return tuple of boolean to indicate whether the move was successful and the new game board
*/
def walkUp(spielbrett: GameboardInterface, player: Player, currentCoord: (Int, Int), figurenum: Int, walksLeft: Int): (Boolean, GameboardInterface)
/** Check, if walking down is legal and do so if possible.
*
* @param spielbrett old game board
* @param player current player
* @param currentCoord coordinates of current figure
* @param figurenum number of figure
* @param walksLeft number of remaining movements
*
* @return tuple of boolean to indicate whether the move was successful and the new game board
*/
def walkDown(spielbrett: GameboardInterface, player: Player, currentCoord: (Int, Int), figurenum: Int, walksLeft: Int): (Boolean, GameboardInterface)
/** Check, if walking left is legal and do so if possible.
*
* @param spielbrett old game board
* @param player current player
* @param currentCoord coordinates of current figure
* @param figurenum number of figure
* @param walksLeft number of remaining movements
*
* @return tuple of boolean to indicate whether the move was successful and the new game board
*/
def walkLeft(spielbrett: GameboardInterface, player: Player, currentCoord: (Int, Int), figurenum: Int, walksLeft: Int): (Boolean, GameboardInterface)
/** Check, if walking right is legal and do so if possible.
*
* @param spielbrett old game board
* @param player current player
* @param currentCoord coordinates of current figure
* @param figurenum number of figure
* @param walksLeft number of remaining movements
*
* @return tuple of boolean to indicate whether the move was successful and the new game board
*/
def walkRight(spielbrett: GameboardInterface, player: Player, currentCoord: (Int, Int), figurenum: Int, walksLeft: Int): (Boolean, GameboardInterface)
/** Die roll.
*
* @return number from 1 to 6
*/
def diceRoll: Int
/** Changes coordinates of a tupel as to go down one cell.
*
* @param oldcord tupel of old x and y coordinates
*
* @return tupel of new x and y coordinates
*/
def goDown(oldcord: (Int, Int)): (Int, Int)
/** Changes coordinates of a tupel as to go up one cell.
*
* @param oldcord tupel of old x and y coordinates
*
* @return tupel of new x and y coordinates
*/
def goUp(oldcord: (Int, Int)): (Int, Int)
/** Changes coordinates of a tupel as to go right one cell.
*
* @param oldcord tupel of old x and y coordinates
*
* @return tupel of new x and y coordinates
*/
def goRight(oldcord: (Int, Int)): (Int, Int)
/** Changes coordinates of a tupel as to go left one cell.
*
* @param oldcord tupel of old x and y coordinates
*
* @return tupel of new x and y coordinates
*/
def goLeft(oldcord: (Int, Int)): (Int, Int)
/** Takes a string and returns the corresponding cell.
*
* @param name string representation of cell
*
* @return cell
*/
def getCell(name:String): Cell
/** Checks if a player is on the goal.
*
* @return boolean
*/
def checkPlayerOnGoal: Boolean
/** Get coordinates of the base of the 1. player.
*
* @return tupel of x and y coordinates
*/
def getP1Base: (Int,Int)
/** Get coordinates of the base of the 2. player.
*
* @return tupel of x and y coordinates
*/
def getP2Base: (Int,Int)
/** Get coordinates of the base of the 3. player.
*
* @return tupel of x and y coordinates
*/
def getP3Base: (Int,Int)
/** Get coordinates of the base of the 4. player.
*
* @return tupel of x and y coordinates
*/
def getP4Base: (Int,Int)
/** Get coordinates of the goal cell.
*
* @return tupel of x and y coordinates
*/
def getGoalBase: (Int,Int)
}
/** Interface for the last move of a player.
*
* @author sehirsig & franzgajewski
*/
trait lastSaveInterface {
/** Last rolled number.
*
* @return Int
*/
val lastFullDice: Int
/** Last walked direction.
*
* @return string "w", "s", "a" or "d"
*/
val lastDirectionOpposite: String
/** Cell last occupied by a given figure.
*
* @return cell
*/
val lastCell: Cell
/** Update last rolled number.
*
* @param newNum newly rolled number
* @return new interface
*/
def updateLastFullDice(newNum: Int): lastSaveInterface
/** Update last walked direction.
*
* @param newDic new direction
* @return new interface
*/
def updateLastDirection(newDic: String): lastSaveInterface
/** Update last occupied cell.
*
* @param newCel new cell
* @return new interface.
*/
def updatelastCell(newCel: Cell): lastSaveInterface
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.