repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
amur-host/node
|
src/test/scala/com/amurplatform/http/AssetsBroadcastRouteSpec.scala
|
<reponame>amur-host/node
package com.amurplatform.http
import akka.http.scaladsl.model.StatusCodes
import com.typesafe.config.ConfigFactory
import com.amurplatform.RequestGen
import com.amurplatform.http.ApiMarshallers._
import com.amurplatform.settings.RestAPISettings
import com.amurplatform.state.Diff
import com.amurplatform.state.diffs.TransactionDiffer.TransactionValidationError
import com.amurplatform.utx.{UtxBatchOps, UtxPool}
import io.netty.channel.group.ChannelGroup
import org.scalacheck.Gen._
import org.scalacheck.{Gen => G}
import org.scalamock.scalatest.PathMockFactory
import org.scalatest.prop.PropertyChecks
import play.api.libs.json.{JsObject, JsValue, Json, Writes}
import com.amurplatform.api.http._
import com.amurplatform.api.http.assets._
import com.amurplatform.utils.Base58
import com.amurplatform.transaction.ValidationError.GenericError
import com.amurplatform.transaction.transfer._
import com.amurplatform.transaction.{Proofs, Transaction, ValidationError}
import com.amurplatform.wallet.Wallet
import shapeless.Coproduct
class AssetsBroadcastRouteSpec extends RouteSpec("/assets/broadcast/") with RequestGen with PathMockFactory with PropertyChecks {
private val settings = RestAPISettings.fromConfig(ConfigFactory.load())
private val utx = stub[UtxPool]
private val allChannels = stub[ChannelGroup]
(utx.putIfNew _).when(*).onCall((t: Transaction) => Left(TransactionValidationError(GenericError("foo"), t))).anyNumberOfTimes()
"returns StateCheckFailed" - {
val route = AssetsBroadcastApiRoute(settings, utx, allChannels).route
val vt = Table[String, G[_ <: Transaction], (JsValue) => JsValue](
("url", "generator", "transform"),
("issue", issueGen.retryUntil(_.version == 1), identity),
("reissue", reissueGen.retryUntil(_.version == 1), identity),
("burn", burnGen.retryUntil(_.version == 1), {
case o: JsObject => o ++ Json.obj("quantity" -> o.value("amount"))
case other => other
}),
("transfer", transferV1Gen, {
case o: JsObject if o.value.contains("feeAsset") =>
o ++ Json.obj("feeAssetId" -> o.value("feeAsset"), "quantity" -> o.value("amount"))
case other => other
})
)
def posting(url: String, v: JsValue): RouteTestResult = Post(routePath(url), v) ~> route
"when state validation fails" in {
forAll(vt) { (url, gen, transform) =>
forAll(gen) { (t: Transaction) =>
posting(url, transform(t.json())) should produce(StateCheckFailed(t, "foo"))
}
}
}
}
"returns appropriate error code when validation fails for" - {
val route = AssetsBroadcastApiRoute(settings, utx, allChannels).route
"issue transaction" in forAll(broadcastIssueReq) { ir =>
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("issue"), v) ~> route
forAll(nonPositiveLong) { q =>
posting(ir.copy(fee = q)) should produce(InsufficientFee())
}
forAll(nonPositiveLong) { q =>
posting(ir.copy(quantity = q)) should produce(NegativeAmount(s"$q of assets"))
}
forAll(invalidDecimals) { d =>
posting(ir.copy(decimals = d)) should produce(TooBigArrayAllocation)
}
forAll(longDescription) { d =>
posting(ir.copy(description = d)) should produce(TooBigArrayAllocation)
}
forAll(invalidName) { name =>
posting(ir.copy(name = name)) should produce(InvalidName)
}
forAll(invalidBase58) { name =>
posting(ir.copy(name = name)) should produce(InvalidName)
}
forAll(nonPositiveLong) { fee =>
posting(ir.copy(fee = fee)) should produce(InsufficientFee())
}
}
"reissue transaction" in forAll(broadcastReissueReq) { rr =>
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("reissue"), v) ~> route
// todo: invalid sender
forAll(nonPositiveLong) { q =>
posting(rr.copy(quantity = q)) should produce(NegativeAmount(s"$q of assets"))
}
forAll(nonPositiveLong) { fee =>
posting(rr.copy(fee = fee)) should produce(InsufficientFee())
}
}
"burn transaction" in forAll(broadcastBurnReq) { br =>
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("burn"), v) ~> route
forAll(invalidBase58) { pk =>
posting(br.copy(senderPublicKey = pk)) should produce(InvalidAddress)
}
forAll(nonPositiveLong) { q =>
posting(br.copy(quantity = q)) should produce(NegativeAmount(s"$q of assets"))
}
forAll(nonPositiveLong) { fee =>
posting(br.copy(fee = fee)) should produce(InsufficientFee())
}
}
"transfer transaction" in forAll(broadcastTransferReq) { tr =>
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("transfer"), v) ~> route
forAll(nonPositiveLong) { q =>
posting(tr.copy(amount = q)) should produce(NegativeAmount(s"$q of amur"))
}
forAll(invalidBase58) { pk =>
posting(tr.copy(senderPublicKey = pk)) should produce(InvalidAddress)
}
forAll(invalidBase58) { a =>
posting(tr.copy(recipient = a)) should produce(InvalidAddress)
}
forAll(invalidBase58) { a =>
posting(tr.copy(assetId = Some(a))) should produce(CustomValidationError("invalid.assetId"))
}
forAll(invalidBase58) { a =>
posting(tr.copy(feeAssetId = Some(a))) should produce(CustomValidationError("invalid.feeAssetId"))
}
forAll(longAttachment) { a =>
posting(tr.copy(attachment = Some(a))) should produce(CustomValidationError("invalid.attachment"))
}
forAll(posNum[Long]) { quantity =>
posting(tr.copy(amount = quantity, fee = Long.MaxValue)) should produce(OverflowError)
}
forAll(nonPositiveLong) { fee =>
posting(tr.copy(fee = fee)) should produce(InsufficientFee())
}
}
}
"compatibility" - {
val alwaysApproveUtx = stub[UtxPool]
val utxOps = new UtxBatchOps {
override def putIfNew(tx: Transaction): Either[ValidationError, (Boolean, Diff)] = alwaysApproveUtx.putIfNew(tx)
}
(alwaysApproveUtx.batched[Any] _).when(*).onCall((f: UtxBatchOps => Any) => f(utxOps)).anyNumberOfTimes()
(alwaysApproveUtx.putIfNew _).when(*).onCall((_: Transaction) => Right((true, Diff.empty))).anyNumberOfTimes()
val alwaysSendAllChannels = stub[ChannelGroup]
(alwaysSendAllChannels.writeAndFlush(_: Any)).when(*).onCall((_: Any) => null).anyNumberOfTimes()
val route = AssetsBroadcastApiRoute(settings, alwaysApproveUtx, alwaysSendAllChannels).route
val seed = "seed".getBytes()
val senderPrivateKey = Wallet.generateNewAccount(seed, 0)
val receiverPrivateKey = Wallet.generateNewAccount(seed, 1)
val transferRequest = createSignedTransferRequest(
TransferTransactionV1
.selfSigned(
assetId = None,
sender = senderPrivateKey,
recipient = receiverPrivateKey.toAddress,
amount = 1 * Amur,
timestamp = System.currentTimeMillis(),
feeAssetId = None,
feeAmount = Amur / 3,
attachment = Array.emptyByteArray
)
.right
.get
)
val versionedTransferRequest = createSignedVersionedTransferRequest(
TransferTransactionV2
.create(
assetId = None,
sender = senderPrivateKey,
recipient = receiverPrivateKey.toAddress,
amount = 1 * Amur,
timestamp = System.currentTimeMillis(),
feeAssetId = None,
feeAmount = Amur / 3,
attachment = Array.emptyByteArray,
version = 2,
proofs = Proofs(Seq.empty)
)
.right
.get)
"/transfer" - {
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("transfer"), v).addHeader(ApiKeyHeader) ~> route
"accepts TransferRequest" in posting(transferRequest) ~> check {
status shouldBe StatusCodes.OK
responseAs[TransferTransactions].select[TransferTransactionV1] shouldBe defined
}
"accepts VersionedTransferRequest" in posting(versionedTransferRequest) ~> check {
status shouldBe StatusCodes.OK
responseAs[TransferTransactions].select[TransferTransactionV2] shouldBe defined
}
"returns a error if it is not a transfer request" in posting(issueReq.sample.get) ~> check {
status shouldNot be(StatusCodes.OK)
}
}
"/batch-transfer" - {
def posting[A: Writes](v: A): RouteTestResult = Post(routePath("batch-transfer"), v).addHeader(ApiKeyHeader) ~> route
"accepts TransferRequest" in posting(List(transferRequest)) ~> check {
status shouldBe StatusCodes.OK
val xs = responseAs[Seq[TransferTransactions]]
xs.size shouldBe 1
xs.head.select[TransferTransactionV1] shouldBe defined
}
"accepts VersionedTransferRequest" in posting(List(versionedTransferRequest)) ~> check {
status shouldBe StatusCodes.OK
val xs = responseAs[Seq[TransferTransactions]]
xs.size shouldBe 1
xs.head.select[TransferTransactionV2] shouldBe defined
}
"accepts both TransferRequest and VersionedTransferRequest" in {
val reqs = List(
Coproduct[SignedTransferRequests](transferRequest),
Coproduct[SignedTransferRequests](versionedTransferRequest)
)
posting(reqs) ~> check {
status shouldBe StatusCodes.OK
val xs = responseAs[Seq[TransferTransactions]]
xs.size shouldBe 2
xs.flatMap(_.select[TransferTransactionV1]) shouldNot be(empty)
xs.flatMap(_.select[TransferTransactionV2]) shouldNot be(empty)
}
}
"returns a error if it is not a transfer request" in posting(List(issueReq.sample.get)) ~> check {
status shouldNot be(StatusCodes.OK)
}
}
}
protected def createSignedTransferRequest(tx: TransferTransactionV1): SignedTransferV1Request = {
import tx._
SignedTransferV1Request(
Base58.encode(tx.sender.publicKey),
assetId.map(_.base58),
recipient.stringRepr,
amount,
fee,
feeAssetId.map(_.base58),
timestamp,
attachment.headOption.map(_ => Base58.encode(attachment)),
signature.base58
)
}
protected def createSignedVersionedTransferRequest(tx: TransferTransactionV2): SignedTransferV2Request = {
import tx._
SignedTransferV2Request(
Base58.encode(tx.sender.publicKey),
assetId.map(_.base58),
recipient.stringRepr,
amount,
feeAssetId.map(_.base58),
fee,
timestamp,
version,
attachment.headOption.map(_ => Base58.encode(attachment)),
proofs.proofs.map(_.base58).toList
)
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/state/BlockMinerInfo.scala
|
package com.amurplatform.state
import com.amurplatform.block.Block.BlockId
import com.amurplatform.consensus.nxt.NxtLikeConsensusBlockData
case class BlockMinerInfo(consensus: NxtLikeConsensusBlockData, timestamp: Long, blockId: BlockId)
|
amur-host/node
|
lang/jvm/src/test/scala/com/amurplatform/lang/SerdeTest.scala
|
<reponame>amur-host/node<gh_stars>1-10
package com.amurplatform.lang
import com.amurplatform.lang.Common._
import com.amurplatform.lang.v1.compiler.CompilerV1
import com.amurplatform.lang.v1.compiler.Terms._
import com.amurplatform.lang.v1.evaluator.ctx.impl.PureContext
import com.amurplatform.lang.v1.parser.Expressions
import com.amurplatform.lang.v1.testing.ScriptGen
import com.amurplatform.lang.v1.{FunctionHeader, Serde}
import org.scalatest.prop.PropertyChecks
import org.scalatest.{Assertion, FreeSpec, Matchers}
import scodec.bits.ByteVector
class SerdeTest extends FreeSpec with PropertyChecks with Matchers with ScriptGen with NoShrink {
"roundtrip" - {
"CONST_LONG" in roundTripTest(CONST_LONG(1))
"CONST_BYTEVECTOR" in roundTripTest(CONST_BYTEVECTOR(ByteVector[Byte](1)))
"CONST_STRING" in roundTripTest(CONST_STRING("foo"))
"IF" in roundTripTest(IF(TRUE, CONST_LONG(0), CONST_LONG(1)))
"BLOCK" in roundTripTest(
BLOCK(
let = LET("foo", TRUE),
body = FALSE
)
)
"REF" in roundTripTest(REF("foo"))
"TRUE" in roundTripTest(TRUE)
"FALSE" in roundTripTest(FALSE)
"GETTER" in roundTripTest(GETTER(REF("foo"), "bar"))
"FUNCTION_CALL" - {
"native" in roundTripTest(
FUNCTION_CALL(
function = FunctionHeader.Native(1),
args = List(TRUE)
)
)
"user" in roundTripTest(
FUNCTION_CALL(
function = FunctionHeader.User("foo"),
args = List(TRUE)
)
)
"empty args" in roundTripTest(
FUNCTION_CALL(
function = FunctionHeader.User("foo"),
args = List.empty
)
)
}
"general" in forAll(BOOLgen(10)) {
case (untypedExpr, _) => roundTripTest(untypedExpr)
}
"stack safety" in {
val bigSum = (1 to 10000).foldLeft[EXPR](CONST_LONG(0)) { (r, i) =>
FUNCTION_CALL(
function = PureContext.sumLong,
args = List(r, CONST_LONG(i))
)
}
val expr: EXPR = FUNCTION_CALL(
function = PureContext.eq,
args = List(CONST_LONG(1), bigSum)
)
Serde.serialize(expr).nonEmpty shouldBe true
}
}
private def roundTripTest(untypedExpr: Expressions.EXPR): Assertion = {
val typedExpr = CompilerV1(PureContext.compilerContext, untypedExpr).map(_._1).explicitGet()
roundTripTest(typedExpr)
}
private def roundTripTest(typedExpr: EXPR): Assertion = {
val encoded = Serde.serialize(typedExpr)
encoded.nonEmpty shouldBe true
val decoded = Serde.deserialize(encoded).explicitGet()
withClue(s"encoded bytes: [${encoded.mkString(", ")}]") {
decoded shouldEqual typedExpr
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/VersionedTransaction.scala
|
package com.amurplatform.transaction
trait VersionedTransaction {
def version: Byte
}
|
amur-host/node
|
src/test/scala/com/amurplatform/transaction/smart/script/ScriptReaderTest.scala
|
package com.amurplatform.transaction.smart.script
import com.amurplatform.crypto
import com.amurplatform.lang.ScriptVersion.Versions.V1
import com.amurplatform.lang.v1.Serde
import com.amurplatform.lang.v1.compiler.Terms.TRUE
import com.amurplatform.state.diffs.produce
import org.scalatest.{FreeSpec, Matchers}
class ScriptReaderTest extends FreeSpec with Matchers {
val checksumLength = 4
"should parse all bytes for V1" in {
val body = Array(V1.value.toByte) ++ Serde.serialize(TRUE) ++ "foo".getBytes
val allBytes = body ++ crypto.secureHash(body).take(checksumLength)
ScriptReader.fromBytes(allBytes) should produce("bytes left")
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/metrics/Metrics.scala
|
<gh_stars>1-10
package com.amurplatform.metrics
import java.net.URI
import java.util.concurrent.TimeUnit
import com.amurplatform.utils.{ScorexLogging, TimeImpl}
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.SchedulerService
import org.influxdb.dto.Point
import org.influxdb.{InfluxDB, InfluxDBFactory}
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal
object Metrics extends ScorexLogging {
case class InfluxDbSettings(uri: URI,
db: String,
username: Option[String],
password: Option[String],
batchActions: Int,
batchFlashDuration: FiniteDuration)
case class Settings(enable: Boolean, nodeId: Int, influxDb: InfluxDbSettings)
private implicit val scheduler: SchedulerService = Scheduler.singleThread("metrics")
private var settings: Settings = _
private var db: Option[InfluxDB] = None
private val time = new TimeImpl
def start(config: Settings): Future[Boolean] =
Task {
db.foreach { dbc =>
try {
db = None
dbc.close()
} catch {
case e: Throwable => log.warn(s"Failed to close InfluxDB (${e.getMessage()})")
}
}
settings = config
if (settings.enable) {
import config.{influxDb => dbSettings}
log.info(s"Precise metrics are enabled and will be sent to ${dbSettings.uri}/${dbSettings.db}")
try {
val x = if (dbSettings.username.nonEmpty && dbSettings.password.nonEmpty) {
InfluxDBFactory.connect(
dbSettings.uri.toString,
dbSettings.username.getOrElse(""),
dbSettings.password.getOrElse("")
)
} else {
InfluxDBFactory.connect(dbSettings.uri.toString)
}
x.setDatabase(dbSettings.db)
x.enableBatch(dbSettings.batchActions, dbSettings.batchFlashDuration.toSeconds.toInt, TimeUnit.SECONDS)
try {
val pong = x.ping()
log.info(s"Metrics will be sent to ${dbSettings.uri}/${dbSettings.db}. Connected in ${pong.getResponseTime}ms.")
db = Some(x)
} catch {
case NonFatal(e) =>
log.warn("Can't connect to InfluxDB", e)
}
} catch {
case e: Throwable => log.warn(s"Failed to connect to InfluxDB (${e.getMessage()})")
}
}
db.nonEmpty
}.runAsyncLogErr
def shutdown(): Unit =
Task {
db.foreach(_.close())
time.close()
}.runAsyncLogErr
def write(b: Point.Builder): Unit = {
db.foreach { db =>
val ts = time.getTimestamp()
Task {
try {
db.write(
b
// Should be a tag, but tags are the strings now
// https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#tag-value
.addField("node", settings.nodeId)
.tag("node", settings.nodeId.toString)
.time(ts, TimeUnit.MILLISECONDS)
.build())
} catch {
case e: Throwable => log.warn(s"Failed to send data to InfluxDB (${e.getMessage()})")
}
}.runAsyncLogErr
}
}
def writeEvent(name: String): Unit = write(Point.measurement(name))
}
|
amur-host/node
|
src/test/scala/com/amurplatform/transaction/ReissueTransactionV2Specification.scala
|
package com.amurplatform.transaction
import com.amurplatform.TransactionGen
import com.amurplatform.state.{ByteStr, EitherExt2}
import org.scalatest._
import org.scalatest.prop.PropertyChecks
import play.api.libs.json.Json
import com.amurplatform.account.PublicKeyAccount
import com.amurplatform.transaction.assets.ReissueTransactionV2
class ReissueTransactionV2Specification extends PropSpec with PropertyChecks with Matchers with TransactionGen {
property("JSON format validation") {
val js = Json.parse("""{
"type": 5,
"id": "HbQ7gMoDyRxSU6LbLLBVNTbxASaR8rm4Zck6eYvWVUkB",
"sender": "3N5GRqzDBhjVXnCn44baHcz2GoZy5qLxtTh",
"senderPublicKey": "<KEY>",
"fee": 100000000,
"timestamp": 1526287561757,
"proofs": [
"<KEY>"
],
"version": 2,
"chainId": 84,
"assetId": "9ekQuYn92natMnMq8KqeGK3Nn7cpKd3BvPEGgD6fFyyz",
"quantity": 100000000,
"reissuable": true
}
""")
val tx = ReissueTransactionV2
.create(
2,
'T',
PublicKeyAccount.fromBase58String("<KEY>").explicitGet(),
ByteStr.decodeBase58("9ekQuYn92natMnMq8KqeGK3Nn7cpKd3BvPEGgD6fFyyz").get,
100000000L,
true,
100000000L,
1526287561757L,
Proofs(Seq(ByteStr.decodeBase58("4DFEtUwJ9gjMQMuEXipv2qK7rnhhWEBqzpC3ZQ<KEY>").get))
)
.right
.get
js shouldEqual tx.json()
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/state/BalanceSnapshot.scala
|
package com.amurplatform.state
case class BalanceSnapshot(height: Int, regularBalance: Long, leaseIn: Long, leaseOut: Long) {
lazy val effectiveBalance = regularBalance + leaseIn - leaseOut
}
object BalanceSnapshot {
def apply(height: Int, p: Portfolio): BalanceSnapshot =
BalanceSnapshot(height, p.balance, p.lease.in, p.lease.out)
}
|
amur-host/node
|
src/main/scala/com/amurplatform/account/Address.scala
|
<filename>src/main/scala/com/amurplatform/account/Address.scala<gh_stars>1-10
package com.amurplatform.account
import java.nio.ByteBuffer
import com.amurplatform.crypto
import com.amurplatform.state.ByteStr
import com.amurplatform.utils.{Base58, ScorexLogging, base58Length}
import com.amurplatform.transaction.ValidationError
import com.amurplatform.transaction.ValidationError.InvalidAddress
sealed trait Address extends AddressOrAlias {
val bytes: ByteStr
lazy val address: String = bytes.base58
lazy val stringRepr: String = address
}
object Address extends ScorexLogging {
val Prefix: String = "address:"
val AddressVersion: Byte = 1
val ChecksumLength = 4
val HashLength = 20
val AddressLength = 1 + 1 + HashLength + ChecksumLength
val AddressStringLength = base58Length(AddressLength)
private def scheme = AddressScheme.current
private class AddressImpl(val bytes: ByteStr) extends Address
def fromPublicKey(publicKey: Array[Byte], chainId: Byte = scheme.chainId): Address = {
val publicKeyHash = crypto.secureHash(publicKey)
val withoutChecksum = ByteBuffer.allocate(1 + 1 + HashLength).put(AddressVersion).put(chainId).put(publicKeyHash, 0, HashLength).array()
val bytes = ByteBuffer.allocate(AddressLength).put(withoutChecksum).put(crypto.secureHash(withoutChecksum), 0, ChecksumLength).array()
new AddressImpl(ByteStr(bytes))
}
def fromBytes(addressBytes: Array[Byte], chainId: Byte = scheme.chainId): Either[InvalidAddress, Address] = {
val version = addressBytes.head
val network = addressBytes.tail.head
(for {
_ <- Either.cond(version == AddressVersion, (), s"Unknown address version: $version")
_ <- Either.cond(network == chainId, (), s"Data from other network: expected: $chainId(${chainId.toChar}), actual: $network(${network.toChar})")
_ <- Either.cond(addressBytes.length == Address.AddressLength,
(),
s"Wrong addressBytes length: expected: ${Address.AddressLength}, actual: ${addressBytes.length}")
checkSum = addressBytes.takeRight(ChecksumLength)
checkSumGenerated = calcCheckSum(addressBytes.dropRight(ChecksumLength))
_ <- Either.cond(checkSum.sameElements(checkSumGenerated), (), s"Bad address checksum")
} yield new AddressImpl(ByteStr(addressBytes))).left.map(InvalidAddress)
}
def fromString(addressStr: String): Either[ValidationError, Address] = {
val base58String = if (addressStr.startsWith(Prefix)) addressStr.drop(Prefix.length) else addressStr
for {
_ <- Either.cond(base58String.length <= AddressStringLength,
(),
InvalidAddress(s"Wrong address string length: max=$AddressStringLength, actual: ${base58String.length}"))
byteArray <- Base58.decode(base58String).toEither.left.map(ex => InvalidAddress(s"Unable to decode base58: ${ex.getMessage}"))
address <- fromBytes(byteArray)
} yield address
}
private def calcCheckSum(withoutChecksum: Array[Byte]): Array[Byte] = crypto.secureHash(withoutChecksum).take(ChecksumLength)
}
|
amur-host/node
|
benchmark/src/test/scala/com/amurplatform/lang/v1/ScriptEvaluatorBenchmark.scala
|
<reponame>amur-host/node
package com.amurplatform.lang.v1
import java.util.concurrent.TimeUnit
import cats.kernel.Monoid
import com.amurplatform.lang.Global
import com.amurplatform.lang.v1.FunctionHeader.Native
import com.amurplatform.lang.v1.compiler.Terms._
import com.amurplatform.lang.v1.evaluator.EvaluatorV1
import com.amurplatform.lang.v1.evaluator.FunctionIds.{FROMBASE58, TOBASE58, SIGVERIFY}
import com.amurplatform.lang.v1.evaluator.ctx.EvaluationContext
import com.amurplatform.lang.v1.evaluator.ctx.impl.{CryptoContext, PureContext}
import com.amurplatform.utils.Base58
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import scodec.bits.ByteVector
import scorex.crypto.signatures.Curve25519
import scala.util.Random
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@BenchmarkMode(Array(Mode.AverageTime))
@Threads(4)
@Fork(1)
@Warmup(iterations = 20)
@Measurement(iterations = 10)
class ScriptEvaluatorBenchmark {
@Benchmark
def bigSum(st: BigSum, bh: Blackhole): Unit = bh.consume(EvaluatorV1[Boolean](PureContext.evalContext, st.expr))
@Benchmark
def nestedBlocks(st: NestedBlocks, bh: Blackhole): Unit = bh.consume(EvaluatorV1[Boolean](st.context, st.expr))
@Benchmark
def signatures(st: Signatures, bh: Blackhole): Unit = bh.consume(EvaluatorV1[Boolean](st.context, st.expr))
@Benchmark
def base58encode(st: Base58Perf, bh: Blackhole): Unit = bh.consume(EvaluatorV1[Boolean](st.context, st.encode))
@Benchmark
def base58decode(st: Base58Perf, bh: Blackhole): Unit = bh.consume(EvaluatorV1[Boolean](st.context, st.decode))
}
@State(Scope.Benchmark)
class NestedBlocks {
val context: EvaluationContext = PureContext.evalContext
val expr: EXPR = {
val blockCount = 300
val cond = FUNCTION_CALL(PureContext.eq, List(REF(s"v$blockCount"), CONST_LONG(0)))
val blocks = (1 to blockCount).foldRight[EXPR](cond) { (i, e) =>
BLOCK(LET(s"v$i", REF(s"v${i - 1}")), e)
}
BLOCK(LET("v0", CONST_LONG(0)), blocks)
}
}
@State(Scope.Benchmark)
class Base58Perf {
val context: EvaluationContext = Monoid.combine(PureContext.evalContext, CryptoContext.build(Global).evaluationContext)
val encode: EXPR = {
val base58Count = 120
val sum = (1 to base58Count).foldRight[EXPR](CONST_LONG(0)) {
case (i, e) => FUNCTION_CALL(PureContext.sumLong, List(REF("v" + i), e))
}
(1 to base58Count)
.map { i =>
val b = new Array[Byte](64)
Random.nextBytes(b)
LET("v" + i, FUNCTION_CALL(PureContext.sizeString, List(FUNCTION_CALL(Native(TOBASE58), List(CONST_BYTEVECTOR(ByteVector(b)))))))
}
.foldRight[EXPR](FUNCTION_CALL(PureContext.eq, List(sum, CONST_LONG(base58Count)))) {
case (let, e) => BLOCK(let, e)
}
}
val decode: EXPR = {
val base58Length = 6000
val b = new Array[Byte](base58Length)
Random.nextBytes(b)
FUNCTION_CALL(
PureContext.eq,
List(FUNCTION_CALL(PureContext.sizeBytes, List(FUNCTION_CALL(Native(FROMBASE58), List(CONST_STRING(Base58.encode(b)))))),
CONST_LONG(base58Length))
)
}
}
@State(Scope.Benchmark)
class Signatures {
val context: EvaluationContext = Monoid.combine(PureContext.evalContext, CryptoContext.build(Global).evaluationContext)
val expr: EXPR = {
val sigCount = 20
val sum = (1 to sigCount).foldRight[EXPR](CONST_LONG(0)) {
case (i, e) => FUNCTION_CALL(PureContext.sumLong, List(REF("v" + i), e))
}
(1 to sigCount)
.map { i =>
val msg = new Array[Byte](1024)
Random.nextBytes(msg)
val seed = new Array[Byte](256)
Random.nextBytes(seed)
val (sk, pk) = Curve25519.createKeyPair(seed)
val sig = Curve25519.sign(sk, msg)
LET(
"v" + i,
IF(
FUNCTION_CALL(Native(SIGVERIFY),
List(CONST_BYTEVECTOR(ByteVector(msg)), CONST_BYTEVECTOR(ByteVector(sig)), CONST_BYTEVECTOR(ByteVector(pk)))),
CONST_LONG(1),
CONST_LONG(0)
)
)
}
.foldRight[EXPR](FUNCTION_CALL(PureContext.eq, List(sum, CONST_LONG(sigCount)))) {
case (let, e) => BLOCK(let, e)
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/network/message/MessageSpec.scala
|
<filename>src/main/scala/com/amurplatform/network/message/MessageSpec.scala
package com.amurplatform.network.message
import scala.reflect.ClassTag
import scala.util.Try
abstract class MessageSpec[Content <: AnyRef](implicit contentCt: ClassTag[Content]) {
val contentClass: Class[_] = contentCt.runtimeClass
val messageCode: Message.MessageCode
final val messageName: String = """Spec\$$""".r.replaceAllIn(getClass.getSimpleName, "")
def maxLength: Int
def deserializeData(bytes: Array[Byte]): Try[Content]
def serializeData(data: Content): Array[Byte]
override def toString: String = s"MessageSpec($messageCode: $messageName)"
}
|
amur-host/node
|
src/main/scala/com/amurplatform/db/StorageCodecs.scala
|
package com.amurplatform.db
import com.google.common.primitives.Ints
import com.amurplatform.network.{BlockCheckpoint, Checkpoint}
import com.amurplatform.state.EitherExt2
import scala.collection.generic.CanBuildFrom
import scala.util.Try
case class CodecFailure(reason: String) {
override def toString: String = s"codec failure: $reason"
}
case class DecodeResult[A](length: Int, value: A)
trait Codec[A] {
def encode(value: A): Array[Byte]
def decode(bytes: Array[Byte]): Either[CodecFailure, DecodeResult[A]]
}
object Codec {
val SignatureLength: Int = 64
val TrueBytes: Array[Byte] = Array[Byte](1.toByte)
val FalseBytes: Array[Byte] = Array[Byte](0.toByte)
}
object BlockCheckpointCodec extends Codec[BlockCheckpoint] {
override def encode(bcp: BlockCheckpoint): Array[Byte] = {
val result = new Array[Byte](Ints.BYTES + Codec.SignatureLength)
System.arraycopy(Ints.toByteArray(bcp.height), 0, result, 0, Ints.BYTES)
System.arraycopy(bcp.signature, 0, result, Ints.BYTES, Codec.SignatureLength)
result
}
override def decode(arr: Array[Byte]): Either[CodecFailure, DecodeResult[BlockCheckpoint]] = {
val len = Ints.BYTES + Codec.SignatureLength
for {
height <- Try(Ints.fromByteArray(arr.take(Ints.BYTES))).toEither.left.map(e => CodecFailure(e.getMessage))
signature <- Either.cond(arr.length >= len, arr.slice(Ints.BYTES, len), CodecFailure("not enough bytes for signature"))
} yield DecodeResult(len, BlockCheckpoint(height, signature))
}
}
object CheckpointCodec extends Codec[Checkpoint] {
private val itemsCodec = SeqCodec(BlockCheckpointCodec)
override def encode(value: Checkpoint): Array[Byte] = {
val r = itemsCodec.encode(value.items)
val result = new Array[Byte](Codec.SignatureLength + r.length)
System.arraycopy(value.signature, 0, result, 0, Codec.SignatureLength)
System.arraycopy(r, 0, result, Codec.SignatureLength, r.length)
result
}
override def decode(bytes: Array[Byte]): Either[CodecFailure, DecodeResult[Checkpoint]] = {
val signature = bytes.take(Codec.SignatureLength)
for {
_ <- Either.cond(signature.length == Codec.SignatureLength, (), CodecFailure("incorrect signature length"))
items <- itemsCodec.decode(bytes.slice(Codec.SignatureLength, bytes.length))
} yield DecodeResult(Codec.SignatureLength + items.length, Checkpoint(items.value, signature))
}
}
object SeqCodec {
def apply[A](valueCodec: Codec[A]): ColCodec[Seq, A] = ColCodec(valueCodec)
}
case class ColCodec[Col[BB] <: TraversableOnce[BB], A](valueCodec: Codec[A])(implicit cbf: CanBuildFrom[Col[A], A, Col[A]]) extends Codec[Col[A]] {
override def encode(value: Col[A]): Array[Byte] = {
val builder = Array.newBuilder[Byte]
value.foreach[Unit] { item: A =>
builder.++=(valueCodec.encode(item))
}
val bytes = builder.result()
val len = bytes.length
val result = new Array[Byte](Ints.BYTES + len)
System.arraycopy(Ints.toByteArray(value.size), 0, result, 0, Ints.BYTES)
System.arraycopy(bytes, 0, result, Ints.BYTES, len)
result
}
override def decode(bytes: Array[Byte]): Either[CodecFailure, DecodeResult[Col[A]]] = {
val n = Try(Ints.fromByteArray(bytes.take(Ints.BYTES))).toEither.left.map(e => CodecFailure(e.getMessage))
if (n.isRight) {
val expectedLength = n.explicitGet()
val builder = cbf()
var i = Ints.BYTES
var error = false
while (i < bytes.length && !error) {
val r = valueCodec.decode(bytes.slice(i, bytes.length))
if (r.isRight) {
val rr = r.explicitGet()
i = i + rr.length
builder.+=(rr.value)
} else {
error = true
}
}
val result = builder.result()
Either.cond(!error && expectedLength == result.size, DecodeResult(i, result), CodecFailure(s"failed to deserialize $expectedLength items"))
} else Left(n.left.get)
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/api/http/assets/SignedTransferV1Request.scala
|
package com.amurplatform.api.http.assets
import io.swagger.annotations.{ApiModel, ApiModelProperty}
import play.api.libs.functional.syntax._
import play.api.libs.json._
import com.amurplatform.account.{AddressOrAlias, PublicKeyAccount}
import com.amurplatform.api.http.BroadcastRequest
import com.amurplatform.transaction.TransactionParsers.SignatureStringLength
import com.amurplatform.transaction.transfer._
import com.amurplatform.transaction.{AssetIdStringLength, ValidationError}
object SignedTransferV1Request {
implicit val reads: Reads[SignedTransferV1Request] = (
(JsPath \ "senderPublicKey").read[String] and
(JsPath \ "assetId").readNullable[String] and
(JsPath \ "recipient").read[String] and
(JsPath \ "amount").read[Long] and
(JsPath \ "fee").read[Long] and
(JsPath \ "feeAssetId").read[String].map(Option.apply).orElse((JsPath \ "feeAsset").readNullable[String]) and
(JsPath \ "timestamp").read[Long] and
(JsPath \ "attachment").readNullable[String] and
(JsPath \ "signature").read[String]
)(SignedTransferV1Request.apply _)
implicit val writes: Writes[SignedTransferV1Request] = Json.writes[SignedTransferV1Request]
}
@ApiModel(value = "Signed Asset transfer transaction")
case class SignedTransferV1Request(@ApiModelProperty(value = "Base58 encoded sender public key", required = true)
senderPublicKey: String,
@ApiModelProperty(value = "Base58 encoded Asset ID")
assetId: Option[String],
@ApiModelProperty(value = "Recipient address", required = true)
recipient: String,
@ApiModelProperty(required = true, example = "1000000")
amount: Long,
@ApiModelProperty(required = true)
fee: Long,
@ApiModelProperty(value = "Fee asset ID")
feeAssetId: Option[String],
@ApiModelProperty(required = true)
timestamp: Long,
@ApiModelProperty(value = "Base58 encoded attachment")
attachment: Option[String],
@ApiModelProperty(required = true)
signature: String)
extends BroadcastRequest {
def toTx: Either[ValidationError, TransferTransactionV1] =
for {
_sender <- PublicKeyAccount.fromBase58String(senderPublicKey)
_assetId <- parseBase58ToOption(assetId.filter(_.length > 0), "invalid.assetId", AssetIdStringLength)
_feeAssetId <- parseBase58ToOption(feeAssetId.filter(_.length > 0), "invalid.feeAssetId", AssetIdStringLength)
_signature <- parseBase58(signature, "invalid.signature", SignatureStringLength)
_attachment <- parseBase58(attachment.filter(_.length > 0), "invalid.attachment", TransferTransaction.MaxAttachmentStringSize)
_account <- AddressOrAlias.fromString(recipient)
t <- TransferTransactionV1.create(_assetId, _sender, _account, amount, timestamp, _feeAssetId, fee, _attachment.arr, _signature)
} yield t
}
|
amur-host/node
|
src/test/scala/com/amurplatform/TestWallet.scala
|
package com.amurplatform
import com.amurplatform.settings.WalletSettings
import com.amurplatform.wallet.Wallet
trait TestWallet {
protected val testWallet = {
val wallet = Wallet(WalletSettings(None, "123", None))
wallet.generateNewAccounts(10)
wallet
}
}
|
amur-host/node
|
src/test/scala/com/amurplatform/history/DomainScenarioDrivenPropertyCheck.scala
|
package com.amurplatform.history
import com.amurplatform.db.WithState
import com.amurplatform.settings.AmurSettings
import org.scalacheck.Gen
import org.scalatest.Assertion
import org.scalatest.prop.GeneratorDrivenPropertyChecks
trait DomainScenarioDrivenPropertyCheck extends WithState { _: GeneratorDrivenPropertyChecks =>
def scenario[S](gen: Gen[S], bs: AmurSettings = DefaultAmurSettings)(assertion: (Domain, S) => Assertion): Assertion =
forAll(gen) { s =>
withDomain(bs) { domain =>
assertion(domain, s)
}
}
}
|
amur-host/node
|
lang/shared/src/main/scala/com/amurplatform/lang/package.scala
|
package com.amurplatform
import cats.data.EitherT
import monix.eval.Coeval
import scala.util.{Left, Right}
package object lang {
type ExecutionError = String
type ExecutionLog = String
type TrampolinedExecResult[T] = EitherT[Coeval, ExecutionError, T]
implicit class EitherExt3[A, B](ei: Either[A, B]) {
def explicitGet(): B = ei match {
case Left(value) => throw new Exception(s"$value")
case Right(value) => value
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/state/diffs/GenesisTransactionDiff.scala
|
<gh_stars>1-10
package com.amurplatform.state.diffs
import com.amurplatform.state.{Diff, LeaseBalance, Portfolio}
import com.amurplatform.transaction.ValidationError.GenericError
import com.amurplatform.transaction.{GenesisTransaction, ValidationError}
import scala.util.{Left, Right}
object GenesisTransactionDiff {
def apply(height: Int)(tx: GenesisTransaction): Either[ValidationError, Diff] = {
if (height != 1) Left(GenericError("GenesisTransaction cannot appear in non-initial block"))
else
Right(Diff(height = height, tx = tx, portfolios = Map(tx.recipient -> Portfolio(balance = tx.amount, LeaseBalance.empty, assets = Map.empty))))
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/state/diffs/LeaseTransactionsDiff.scala
|
package com.amurplatform.state.diffs
import cats._
import cats.implicits._
import com.amurplatform.settings.FunctionalitySettings
import com.amurplatform.state._
import com.amurplatform.account.Address
import com.amurplatform.transaction.ValidationError
import com.amurplatform.transaction.ValidationError.GenericError
import com.amurplatform.transaction.lease._
import scala.util.{Left, Right}
object LeaseTransactionsDiff {
def lease(blockchain: Blockchain, height: Int)(tx: LeaseTransaction): Either[ValidationError, Diff] = {
val sender = Address.fromPublicKey(tx.sender.publicKey)
blockchain.resolveAlias(tx.recipient).flatMap { recipient =>
if (recipient == sender)
Left(GenericError("Cannot lease to self"))
else {
val ap = blockchain.portfolio(tx.sender)
if (ap.balance - ap.lease.out < tx.amount) {
Left(GenericError(s"Cannot lease more than own: Balance:${ap.balance}, already leased: ${ap.lease.out}"))
} else {
val portfolioDiff: Map[Address, Portfolio] = Map(
sender -> Portfolio(-tx.fee, LeaseBalance(0, tx.amount), Map.empty),
recipient -> Portfolio(0, LeaseBalance(tx.amount, 0), Map.empty)
)
Right(Diff(height = height, tx = tx, portfolios = portfolioDiff, leaseState = Map(tx.id() -> true)))
}
}
}
}
def leaseCancel(blockchain: Blockchain, settings: FunctionalitySettings, time: Long, height: Int)(
tx: LeaseCancelTransaction): Either[ValidationError, Diff] = {
val leaseEi = blockchain.leaseDetails(tx.leaseId) match {
case None => Left(GenericError(s"Related LeaseTransaction not found"))
case Some(l) => Right(l)
}
for {
lease <- leaseEi
recipient <- blockchain.resolveAlias(lease.recipient)
isLeaseActive = lease.isActive
_ <- if (!isLeaseActive && time > settings.allowMultipleLeaseCancelTransactionUntilTimestamp)
Left(GenericError(s"Cannot cancel already cancelled lease"))
else Right(())
canceller = Address.fromPublicKey(tx.sender.publicKey)
portfolioDiff <- if (tx.sender == lease.sender) {
Right(
Monoid.combine(Map(canceller -> Portfolio(-tx.fee, LeaseBalance(0, -lease.amount), Map.empty)),
Map(recipient -> Portfolio(0, LeaseBalance(-lease.amount, 0), Map.empty))))
} else if (time < settings.allowMultipleLeaseCancelTransactionUntilTimestamp) { // cancel of another acc
Right(
Monoid.combine(Map(canceller -> Portfolio(-tx.fee, LeaseBalance(0, -lease.amount), Map.empty)),
Map(recipient -> Portfolio(0, LeaseBalance(-lease.amount, 0), Map.empty))))
} else
Left(
GenericError(
s"LeaseTransaction was leased by other sender " +
s"and time=$time > allowMultipleLeaseCancelTransactionUntilTimestamp=${settings.allowMultipleLeaseCancelTransactionUntilTimestamp}"))
} yield Diff(height = height, tx = tx, portfolios = portfolioDiff, leaseState = Map(tx.leaseId -> false))
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/consensus/nxt/NxtLikeConsensusBlockData.scala
|
package com.amurplatform.consensus.nxt
import com.amurplatform.state.ByteStr
case class NxtLikeConsensusBlockData(baseTarget: Long, generationSignature: ByteStr)
|
amur-host/node
|
src/main/scala/com/amurplatform/utx/UtxPool.scala
|
package com.amurplatform.utx
import com.amurplatform.mining.MultiDimensionalMiningConstraint
import com.amurplatform.state.{ByteStr, Diff, Portfolio}
import com.amurplatform.account.Address
import com.amurplatform.transaction._
trait UtxPool extends AutoCloseable {
self =>
def putIfNew(tx: Transaction): Either[ValidationError, (Boolean, Diff)]
def removeAll(txs: Traversable[Transaction]): Unit
def accountPortfolio(addr: Address): Portfolio
def portfolio(addr: Address): Portfolio
def all: Seq[Transaction]
def size: Int
def transactionById(transactionId: ByteStr): Option[Transaction]
def packUnconfirmed(rest: MultiDimensionalMiningConstraint, sortInBlock: Boolean): (Seq[Transaction], MultiDimensionalMiningConstraint)
def batched[Result](f: UtxBatchOps => Result): Result = f(createBatchOps)
private[utx] def createBatchOps: UtxBatchOps = new UtxBatchOps {
override def putIfNew(tx: Transaction): Either[ValidationError, (Boolean, Diff)] = self.putIfNew(tx)
}
}
trait UtxBatchOps {
def putIfNew(tx: Transaction): Either[ValidationError, (Boolean, Diff)]
}
|
amur-host/node
|
src/test/scala/com/amurplatform/http/RestAPISettingsHelper.scala
|
<gh_stars>1-10
package com.amurplatform.http
import com.typesafe.config.ConfigFactory
import com.amurplatform.crypto
import com.amurplatform.settings.RestAPISettings
import com.amurplatform.utils.Base58
trait RestAPISettingsHelper {
def apiKey: String = "test_api_key"
lazy val restAPISettings = {
val keyHash = Base58.encode(crypto.secureHash(apiKey.getBytes()))
RestAPISettings.fromConfig(
ConfigFactory
.parseString(s"amur.rest-api.api-key-hash = $keyHash")
.withFallback(ConfigFactory.load()))
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/matcher/MigrationTool.scala
|
<reponame>amur-host/node
package com.amurplatform.matcher
import java.io.File
import java.util.{HashMap => JHashMap}
import com.google.common.base.Charsets.UTF_8
import com.google.common.primitives.Shorts
import com.typesafe.config.ConfigFactory
import com.amurplatform.account.{Address, AddressScheme}
import com.amurplatform.crypto.DigestSize
import com.amurplatform.database.DBExt
import com.amurplatform.db.openDB
import com.amurplatform.matcher.api.DBUtils
import com.amurplatform.matcher.model.{LimitOrder, OrderInfo}
import com.amurplatform.settings.{AmurSettings, loadConfig}
import com.amurplatform.state.{ByteStr, EitherExt2}
import com.amurplatform.transaction.AssetId
import com.amurplatform.transaction.assets.exchange.AssetPair
import com.amurplatform.utils.ScorexLogging
import org.iq80.leveldb.DB
import scala.collection.JavaConverters._
object MigrationTool extends ScorexLogging {
private def collectStats(db: DB): Unit = {
log.info("Collecting stats")
val iterator = db.iterator()
iterator.seekToFirst()
val result = new JHashMap[Short, Stats]
def add(prefix: Short, e: java.util.Map.Entry[Array[Byte], Array[Byte]]): Unit = {
result.compute(
prefix,
(_, maybePrev) =>
maybePrev match {
case null => Stats(1, e.getKey.length, e.getValue.length)
case prev => Stats(prev.entryCount + 1, prev.totalKeySize + e.getKey.length, prev.totalValueSize + e.getValue.length)
}
)
}
try {
while (iterator.hasNext) {
val e = iterator.next()
e.getKey match {
case SK.Orders(_) => add(100.toShort, e)
case SK.OrdersInfo(_) => add(101.toShort, e)
case SK.AddressToOrders(_) => add(102.toShort, e)
case SK.AddressToActiveOrders(_) => add(103.toShort, e)
case SK.AddressPortfolio(_) => add(104.toShort, e)
case SK.Transactions(_) => add(104.toShort, e)
case SK.OrdersToTxIds(_) => add(106.toShort, e)
case bytes =>
val prefix = Shorts.fromByteArray(bytes.take(2))
add(prefix, e)
}
}
} finally iterator.close()
for ((k, s) <- result.asScala) {
println(s"$k, ${s.entryCount}, ${s.totalKeySize}, ${s.totalValueSize}")
}
}
private def deleteLegacyEntries(db: DB): Unit = {
val keysToDelete = Seq.newBuilder[Array[Byte]]
db.iterateOver("matcher:".getBytes(UTF_8)) { e =>
keysToDelete += e.getKey
}
db.readWrite(rw => keysToDelete.result().foreach(rw.delete))
}
private def recalculateReservedBalance(db: DB): Unit = {
log.info("Recalculating reserved balances")
val calculatedReservedBalances = new JHashMap[Address, Map[Option[AssetId], Long]]()
val ordersToDelete = Seq.newBuilder[Array[Byte]]
val orderInfoToUpdate = Seq.newBuilder[(Array[Byte], OrderInfo)]
val key = MatcherKeys.orderInfo(Array.emptyByteArray)
var discrepancyCounter = 0
db.iterateOver(key.keyBytes) { e =>
val orderId = new Array[Byte](DigestSize)
Array.copy(e.getKey, 2, orderId, 0, DigestSize)
val orderInfo = key.parse(e.getValue)
if (!orderInfo.status.isFinal) {
db.get(MatcherKeys.order(orderId)) match {
case None =>
log.info(s"Missing order $orderId")
ordersToDelete += orderId
case Some(order) =>
calculatedReservedBalances.compute(
order.sender, { (_, prevBalances) =>
val spendId = order.getSpendAssetId
val spendRemaining = order.getSpendAmount(order.price, orderInfo.remaining).explicitGet()
val remainingFee = order.matcherFee - LimitOrder.getPartialFee(order.matcherFee, order.amount, orderInfo.filled)
if (remainingFee != orderInfo.remainingFee) {
orderInfoToUpdate += orderId -> orderInfo.copy(remainingFee = remainingFee)
}
val r = Option(prevBalances).fold(Map(spendId -> spendRemaining)) { prevBalances =>
prevBalances.updated(spendId, prevBalances.getOrElse(spendId, 0L) + spendRemaining)
}
// Fee correction
if (order.getReceiveAssetId.isEmpty) r else r.updated(None, r.getOrElse(None, 0L) + remainingFee)
}
)
}
}
}
log.info("Collecting all addresses")
val addresses = Seq.newBuilder[Address]
db.iterateOver(Shorts.toByteArray(5)) { e =>
val addressBytes = new Array[Byte](Address.AddressLength)
Array.copy(e.getKey, 2, addressBytes, 0, Address.AddressLength)
addresses += Address.fromBytes(addressBytes).explicitGet()
}
log.info("Loading stored reserved balances")
val allReservedBalances = addresses.result().map(a => a -> DBUtils.reservedBalance(db, a)).toMap
if (allReservedBalances.size != calculatedReservedBalances.size()) {
log.info(s"Calculated balances: ${calculatedReservedBalances.size()}, stored balances: ${allReservedBalances.size}")
}
val corrections = Seq.newBuilder[((Address, Option[AssetId]), Long)]
var assetsToAdd = Map.empty[Address, Set[Option[AssetId]]]
for (address <- allReservedBalances.keySet ++ calculatedReservedBalances.keySet().asScala) {
val calculated = calculatedReservedBalances.getOrDefault(address, Map.empty)
val stored = allReservedBalances.getOrElse(address, Map.empty)
if (calculated != stored) {
for (assetId <- calculated.keySet ++ stored.keySet) {
val calculatedBalance = calculated.getOrElse(assetId, 0L)
val storedBalance = stored.getOrElse(assetId, 0L)
if (calculatedBalance != storedBalance) {
if (!stored.contains(assetId)) assetsToAdd += address -> (assetsToAdd.getOrElse(address, Set.empty) + assetId)
discrepancyCounter += 1
corrections += (address, assetId) -> calculatedBalance
}
}
}
}
log.info(s"Found $discrepancyCounter discrepancies; writing reserved balances")
db.readWrite { rw =>
for ((address, newAssetIds) <- assetsToAdd) {
val k = MatcherKeys.openVolumeSeqNr(address)
val currSeqNr = rw.get(k)
rw.put(k, currSeqNr + newAssetIds.size)
for ((assetId, i) <- newAssetIds.zipWithIndex) {
rw.put(MatcherKeys.openVolumeAsset(address, currSeqNr + 1 + i), assetId)
}
}
for (((address, assetId), value) <- corrections.result()) {
rw.put(MatcherKeys.openVolume(address, assetId), Some(value))
}
}
val allUpdatedOrderInfo = orderInfoToUpdate.result()
if (allUpdatedOrderInfo.nonEmpty) {
log.info(s"Writing ${allUpdatedOrderInfo.size} updated order info values")
db.readWrite { rw =>
for ((id, oi) <- allUpdatedOrderInfo) {
rw.put(MatcherKeys.orderInfo(id), oi)
}
}
}
log.info("Completed")
}
def main(args: Array[String]): Unit = {
log.info(s"OK, engine start")
val userConfig = args.headOption.fold(ConfigFactory.empty())(f => ConfigFactory.parseFile(new File(f)))
val settings = AmurSettings.fromConfig(loadConfig(userConfig))
val db = openDB(settings.matcherSettings.dataDir)
AddressScheme.current = new AddressScheme {
override val chainId: Byte = settings.blockchainSettings.addressSchemeCharacter.toByte
}
if (args(1) == "stats") {
collectStats(db)
} else if (args(1) == "ao") {
val o = DBUtils.ordersByAddress(db, Address.fromString(args(2)).explicitGet(), Set.empty, false, Int.MaxValue)
println(o.mkString("\n"))
} else if (args(1) == "cb") {
recalculateReservedBalance(db)
} else if (args(1) == "rb") {
for ((assetId, balance) <- DBUtils.reservedBalance(db, Address.fromString(args(2)).explicitGet())) {
log.info(s"${AssetPair.assetIdStr(assetId)}: $balance")
}
} else if (args(1) == "ddd") {
log.warn("DELETING LEGACY ENTRIES")
deleteLegacyEntries(db)
log.info("Finished deleting legacy entries")
} else if (args(1) == "compact") {
log.info("Compacting database")
db.compactRange(null, null)
log.info("Compaction complete")
}
db.close()
}
case class Stats(entryCount: Long, totalKeySize: Long, totalValueSize: Long)
class SK[A](suffix: String, extractor: Array[Byte] => Option[A]) {
val keyBytes = ("matcher:" + suffix + ":").getBytes(UTF_8)
def unapply(bytes: Array[Byte]): Option[A] = {
val (prefix, suffix) = bytes.splitAt(keyBytes.length)
if (prefix.sameElements(keyBytes)) extractor(suffix) else None
}
}
object SK {
def apply[A](suffix: String, extractor: Array[Byte] => Option[A]) = new SK(suffix, extractor)
private def byteStr(b: Array[Byte]) = ByteStr.decodeBase58(new String(b, UTF_8)).toOption
private def addr(b: Array[Byte]) = Address.fromString(new String(b, UTF_8)).toOption
val Orders = SK("orders", byteStr)
val OrdersInfo = SK("infos", byteStr)
val AddressToOrders = SK("addr-orders", addr)
val AddressToActiveOrders = SK("a-addr-orders", addr)
val AddressPortfolio = SK("portfolios", addr)
val Transactions = SK("transactions", byteStr)
val OrdersToTxIds = SK("ord-to-tx-ids", byteStr)
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/matcher/MatcherKeys.scala
|
package com.amurplatform.matcher
import java.nio.ByteBuffer
import com.google.common.primitives.Longs
import com.amurplatform.account.Address
import com.amurplatform.database.Key
import com.amurplatform.matcher.model.OrderInfo
import com.amurplatform.state.ByteStr
import com.amurplatform.transaction.AssetId
import com.amurplatform.transaction.assets.exchange.{ExchangeTransaction, Order, OrderV1, OrderV2}
import scala.util.Failure
object MatcherKeys {
import com.amurplatform.database.KeyHelpers._
private def assetIdToBytes(assetId: Option[AssetId]): Array[Byte] = assetId.fold(Array.emptyByteArray)(_.arr)
val version: Key[Int] = intKey(0, default = 1)
def order(orderId: Array[Byte]): Key[Option[Order]] = Key.opt(
bytes(1, orderId), { x =>
val r = x.head match {
case 1 => OrderV1.parseBytes(x.tail)
case 2 => OrderV2.parseBytes(x.tail)
case v => Failure(new NotImplementedError(s"Unknown version $v"))
}
r.recover {
case e => throw new IllegalArgumentException(s"Can't parse $orderId order: ${e.getMessage}", e)
}.get
}, { x =>
x.version +: x.bytes()
}
)
def orderInfoOpt(orderId: Array[Byte]): Key[Option[OrderInfo]] = Key.opt(
bytes(2, orderId),
orderInfoParser,
x => throw new NotImplementedError(s"You can't write $x to the DB. Please use 'MatcherKeys.orderInfo' for this")
)
def orderInfo(orderId: Array[Byte]): Key[OrderInfo] = Key(
bytes(2, orderId),
Option(_).fold[OrderInfo](OrderInfo.empty)(orderInfoParser), { oi =>
val allocateBytes = if (oi.unsafeTotalSpend.isEmpty) 33 else 41
val buf = ByteBuffer
.allocate(allocateBytes)
.putLong(oi.amount)
.putLong(oi.filled)
.put(if (oi.canceled) 1.toByte else 0.toByte)
.putLong(oi.minAmount.getOrElse(0L))
.putLong(oi.remainingFee)
oi.unsafeTotalSpend.foreach(buf.putLong)
buf.array()
}
)
private def orderInfoParser(input: Array[Byte]): OrderInfo = {
val bb = ByteBuffer.wrap(input)
input.length match {
case 17 => OrderInfo(bb.getLong, bb.getLong, bb.get == 1, None, 0, None)
case 33 => OrderInfo(bb.getLong, bb.getLong, bb.get == 1, Some(bb.getLong), bb.getLong, None)
case 41 => OrderInfo(bb.getLong, bb.getLong, bb.get == 1, Some(bb.getLong), bb.getLong, Some(bb.getLong))
}
}
def addressOrdersSeqNr(address: Address): Key[Int] = bytesSeqNr(3, address.bytes.arr)
def addressOrders(address: Address, seqNr: Int): Key[Option[OrderAssets]] =
Key.opt(hBytes(4, seqNr, address.bytes.arr), OrderAssets.read, OrderAssets.write)
def openVolume(address: Address, assetId: Option[AssetId]): Key[Option[Long]] =
Key.opt(bytes(5, address.bytes.arr ++ assetIdToBytes(assetId)), Longs.fromByteArray, Longs.toByteArray)
def openVolumeSeqNr(address: Address): Key[Int] = bytesSeqNr(6, address.bytes.arr)
def openVolumeAsset(address: Address, seqNr: Int): Key[Option[AssetId]] =
Key(hBytes(7, seqNr, address.bytes.arr), Option(_).collect { case b if b.nonEmpty => ByteStr(b) }, assetIdToBytes)
def orderTxIdsSeqNr(orderId: ByteStr): Key[Int] = bytesSeqNr(8, orderId.arr)
def orderTxId(orderId: ByteStr, seqNr: Int): Key[ByteStr] = Key(hBytes(9, seqNr, orderId.arr), ByteStr(_), _.arr)
def exchangeTransaction(txId: ByteStr): Key[Option[ExchangeTransaction]] =
Key.opt(bytes(10, txId.arr), ExchangeTransaction.parse(_).get, _.bytes())
}
|
amur-host/node
|
src/test/scala/com/amurplatform/db/WithState.scala
|
<gh_stars>1-10
package com.amurplatform.db
import java.nio.file.Files
import com.typesafe.config.ConfigFactory
import com.amurplatform.TestHelpers
import com.amurplatform.database.LevelDBWriter
import com.amurplatform.history.Domain
import com.amurplatform.settings.{FunctionalitySettings, AmurSettings, loadConfig}
import com.amurplatform.state.{Blockchain, BlockchainUpdaterImpl}
import com.amurplatform.utils.{ScorexLogging, TimeImpl}
trait WithState extends ScorexLogging {
private def withState[A](fs: FunctionalitySettings)(f: Blockchain => A): A = {
val path = Files.createTempDirectory("leveldb-test")
val db = openDB(path.toAbsolutePath.toString)
try f(new LevelDBWriter(db, fs))
finally {
db.close()
TestHelpers.deleteRecursively(path)
}
}
def withStateAndHistory(fs: FunctionalitySettings)(test: Blockchain => Any): Unit = withState(fs)(test)
def withDomain[A](settings: AmurSettings = AmurSettings.fromConfig(loadConfig(ConfigFactory.load())))(test: Domain => A): A = {
val time = new TimeImpl
try withState(settings.blockchainSettings.functionalitySettings) { blockchain =>
val bcu = new BlockchainUpdaterImpl(blockchain, settings, time)
try test(Domain(bcu))
finally bcu.shutdown()
} finally {
time.close()
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/Shutdownable.scala
|
package com.amurplatform
trait Shutdownable {
def shutdown(): Unit
}
|
amur-host/node
|
src/main/scala/com/amurplatform/features/FeatureProvider.scala
|
package com.amurplatform.features
import com.amurplatform.state.Blockchain
object FeatureProvider {
implicit class FeatureProviderExt(provider: Blockchain) {
def isFeatureActivated(feature: BlockchainFeature, height: Int): Boolean =
provider.activatedFeatures.get(feature.id).exists(_ <= height)
def activatedFeaturesAt(height: Int): Set[Short] =
provider.activatedFeatures.collect {
case (featureId, activationHeight) if height >= activationHeight => featureId
}.toSet
def featureStatus(feature: Short, height: Int): BlockchainFeatureStatus =
if (provider.activatedFeatures.get(feature).exists(_ <= height)) BlockchainFeatureStatus.Activated
else if (provider.approvedFeatures.get(feature).exists(_ <= height)) BlockchainFeatureStatus.Approved
else BlockchainFeatureStatus.Undefined
def featureActivationHeight(feature: Short): Option[Int] = provider.activatedFeatures.get(feature)
def featureApprovalHeight(feature: Short): Option[Int] = provider.approvedFeatures.get(feature)
}
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/NodeRestartTestSuite.scala
|
<gh_stars>1-10
package com.amurplatform.it.sync
import com.typesafe.config.{Config, ConfigFactory}
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.it.transactions.NodesFromDocker
import com.amurplatform.it.{ReportingTestName, WaitForHeight2}
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers}
class NodeRestartTestSuite extends FreeSpec with Matchers with WaitForHeight2 with CancelAfterFailure with ReportingTestName with NodesFromDocker {
import NodeRestartTestSuite._
override protected def nodeConfigs: Seq[Config] = Configs
private def nodeA = nodes.head
"node should grow up to 5 blocks together and sync" in {
nodes.waitForSameBlockHeadesAt(5)
}
"create many addresses and check them after node restart" in {
1 to 10 map (_ => nodeA.createAddress())
val setOfAddresses = nodeA.getAddresses
val nodeAWithOtherPorts = docker.restartContainer(dockerNodes().head)
val maxHeight = nodes.map(_.height).max
nodeAWithOtherPorts.getAddresses should contain theSameElementsAs (setOfAddresses)
nodes.waitForSameBlockHeadesAt(maxHeight + 2)
}
}
object NodeRestartTestSuite {
import com.amurplatform.it.NodeConfigs._
private val FirstNode = ConfigFactory.parseString(s"""
|amur {
| synchronization.synchronization-timeout = 10s
| blockchain.custom.functionality {
| pre-activated-features.1 = 0
| }
| miner.quorum = 0
| wallet {
| file = "/tmp/wallet.dat"
| password = "<PASSWORD>"
| }
|
|}""".stripMargin)
private val SecondNode = ConfigFactory.parseString(s"""
|amur {
| synchronization.synchronization-timeout = 10s
| blockchain.custom.functionality {
| pre-activated-features.1 = 0
| }
| miner.enable = no
| wallet {
| file = "/tmp/wallet.dat"
| password = "<PASSWORD>"
| }
|}""".stripMargin)
val Configs: Seq[Config] = Seq(
FirstNode.withFallback(Default.head),
SecondNode.withFallback(Default(1))
)
}
|
amur-host/node
|
src/main/scala/com/amurplatform/database/ReadOnlyDB.scala
|
package com.amurplatform.database
import org.iq80.leveldb.{DB, ReadOptions}
class ReadOnlyDB(db: DB, readOptions: ReadOptions) {
def get[V](key: Key[V]): V = key.parse(db.get(key.keyBytes, readOptions))
def has[V](key: Key[V]): Boolean = db.get(key.keyBytes, readOptions) != null
}
|
amur-host/node
|
src/main/scala/com/amurplatform/settings/FeaturesSettings.scala
|
<filename>src/main/scala/com/amurplatform/settings/FeaturesSettings.scala
package com.amurplatform.settings
case class FeaturesSettings(autoShutdownOnUnsupportedFeature: Boolean, supported: List[Short])
|
amur-host/node
|
generator/src/main/scala/com.amurplatform.generator/config/FicusImplicits.scala
|
<reponame>amur-host/node
package com.amurplatform.generator.config
import com.google.common.base.CaseFormat
import com.typesafe.config.Config
import com.amurplatform.state.{BinaryDataEntry, BooleanDataEntry, ByteStr, DataEntry, IntegerDataEntry, StringDataEntry}
import com.amurplatform.transaction.{TransactionParser, TransactionParsers}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.{CollectionReaders, ValueReader}
trait FicusImplicits {
implicit val distributionsReader: ValueReader[Map[TransactionParser, Double]] = {
val converter = CaseFormat.LOWER_HYPHEN.converterTo(CaseFormat.UPPER_CAMEL)
def toTxType(key: String): TransactionParser = TransactionParsers.by(converter.convert(key)).get
CollectionReaders.mapValueReader[Double].map { xs =>
xs.map { case (k, v) => {
println(s"$k - $v")
toTxType(k) -> v
} }
}
}
implicit val dataEntryReader = new ValueReader[DataEntry[_]] {
override def read(config: Config, path: String): DataEntry[_] = {
val key = config.getConfig(path).getString("key")
config.getConfig(path).getString("type") match {
case "Integer" =>
val value = config.getConfig(path).getLong("value")
IntegerDataEntry(key, value)
case "String" =>
val value = config.getConfig(path).getString("value")
StringDataEntry(key, value.asInstanceOf[String])
case "Boolean" =>
val value = config.getConfig(path).getBoolean("value")
BooleanDataEntry(key, value.asInstanceOf[Boolean])
case "Binary" =>
val value = config.getConfig(path).getString("value")
BinaryDataEntry(key, ByteStr.decodeBase58(value.asInstanceOf[String]).get)
}
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/smart/script/ScriptCompiler.scala
|
<reponame>amur-host/node
package com.amurplatform.transaction.smart.script
import cats.implicits._
import com.amurplatform.lang.ScriptVersion
import com.amurplatform.lang.ScriptVersion.Versions.V1
import com.amurplatform.lang.directives.{Directive, DirectiveKey, DirectiveParser}
import com.amurplatform.lang.v1.ScriptEstimator
import com.amurplatform.lang.v1.compiler.CompilerV1
import com.amurplatform.utils
import com.amurplatform.utils.functionCosts
import com.amurplatform.transaction.smart.script.v1.ScriptV1
import scala.util.{Failure, Success, Try}
object ScriptCompiler {
private val v1Compiler = new CompilerV1(utils.dummyCompilerContext)
def apply(scriptText: String): Either[String, (Script, Long)] = {
val directives = DirectiveParser(scriptText)
val scriptWithoutDirectives =
scriptText.lines
.filter(str => !str.contains("{-#"))
.mkString("\n")
for {
v <- extractVersion(directives)
expr <- v match {
case V1 => v1Compiler.compile(scriptWithoutDirectives, directives)
}
script <- ScriptV1(expr)
complexity <- ScriptEstimator(functionCosts, expr)
} yield (script, complexity)
}
def estimate(script: Script): Either[String, Long] = script match {
case Script.Expr(expr) => ScriptEstimator(functionCosts, expr)
}
private def extractVersion(directives: List[Directive]): Either[String, ScriptVersion] = {
directives
.find(_.key == DirectiveKey.LANGUAGE_VERSION)
.map(d =>
Try(d.value.toInt) match {
case Success(v) =>
ScriptVersion
.fromInt(v)
.fold[Either[String, ScriptVersion]](Left("Unsupported language version"))(_.asRight)
case Failure(ex) =>
Left("Can't parse language version")
})
.getOrElse(V1.asRight)
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/api/http/assets/SignedIssueV1Request.scala
|
<filename>src/main/scala/com/amurplatform/api/http/assets/SignedIssueV1Request.scala<gh_stars>1-10
package com.amurplatform.api.http.assets
import com.google.common.base.Charsets
import io.swagger.annotations.{ApiModel, ApiModelProperty}
import play.api.libs.json.{Format, Json}
import com.amurplatform.account.PublicKeyAccount
import com.amurplatform.api.http.BroadcastRequest
import com.amurplatform.transaction.TransactionParsers.SignatureStringLength
import com.amurplatform.transaction.ValidationError
import com.amurplatform.transaction.assets.IssueTransactionV1
object SignedIssueV1Request {
implicit val assetIssueRequestReads: Format[SignedIssueV1Request] = Json.format
}
@ApiModel(value = "Signed Asset issue transaction")
case class SignedIssueV1Request(@ApiModelProperty(value = "Base58 encoded Issuer public key", required = true)
senderPublicKey: String,
@ApiModelProperty(value = "Base58 encoded name of Asset", required = true)
name: String,
@ApiModelProperty(value = "Base58 encoded description of Asset", required = true)
description: String,
@ApiModelProperty(required = true, example = "1000000")
quantity: Long,
@ApiModelProperty(allowableValues = "range[0,8]", example = "8", dataType = "integer", required = true)
decimals: Byte,
@ApiModelProperty(required = true)
reissuable: Boolean,
@ApiModelProperty(required = true)
fee: Long,
@ApiModelProperty(required = true)
timestamp: Long,
@ApiModelProperty(required = true)
signature: String)
extends BroadcastRequest {
def toTx: Either[ValidationError, IssueTransactionV1] =
for {
_sender <- PublicKeyAccount.fromBase58String(senderPublicKey)
_signature <- parseBase58(signature, "invalid signature", SignatureStringLength)
_t <- IssueTransactionV1.create(_sender,
name.getBytes(Charsets.UTF_8),
description.getBytes(Charsets.UTF_8),
quantity,
decimals,
reissuable,
fee,
timestamp,
_signature)
} yield _t
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/package.scala
|
package com.amurplatform.it
import com.amurplatform.state.DataEntry
import com.amurplatform.it.util._
package object sync {
val minFee = 0.001.amur
val leasingFee = 0.002.amur
val smartFee = 0.004.amur
val issueFee = 1.amur
val burnFee = 1.amur
val sponsorFee = 1.amur
val transferAmount = 10.amur
val leasingAmount = transferAmount
val issueAmount = transferAmount
val massTransferFeePerTransfer = 0.0005.amur
val someAssetAmount = 100000
val matcherFee = 0.003.amur
def calcDataFee(data: List[DataEntry[_]]): Long = {
val dataSize = data.map(_.toBytes.length).sum + 128
if (dataSize > 1024) {
minFee * (dataSize / 1024 + 1)
} else minFee
}
def calcMassTransferFee(numberOfRecipients: Int): Long = {
minFee + massTransferFeePerTransfer * (numberOfRecipients + 1)
}
val supportedVersions = List(null, "2") //sign and broadcast use default for V1
}
|
amur-host/node
|
src/test/scala/com/amurplatform/state/diffs/smart/predef/AddressTest.scala
|
package com.amurplatform.state.diffs.smart.predef
import com.amurplatform.state._
import com.amurplatform.{NoShrink, TransactionGen}
import org.scalatest.{Matchers, PropSpec}
import org.scalatest.prop.PropertyChecks
import scodec.bits.ByteVector
import com.amurplatform.account.Address
class AddressTest extends PropSpec with PropertyChecks with Matchers with TransactionGen with NoShrink {
property("should calculate address from public key") {
forAll(accountGen) { acc =>
val script =
s"""
| let pk = base58'${ByteStr(acc.publicKey).base58}'
| let address = addressFromPublicKey(pk)
| address.bytes
""".stripMargin
runScript[ByteVector](script) shouldBe Right(ByteVector(Address.fromPublicKey(acc.publicKey, networkByte).bytes.arr))
}
}
property("should calculate address from bytes") {
forAll(accountGen) { acc =>
val addressBytes = Address.fromPublicKey(acc.publicKey, networkByte).bytes
val script =
s"""
| let addressString = "${addressBytes.base58}"
| let maybeAddress = addressFromString(addressString)
| let address = extract(maybeAddress)
| address.bytes
""".stripMargin
runScript[ByteVector](script) shouldBe Right(ByteVector(Address.fromBytes(addressBytes.arr, networkByte).explicitGet().bytes.arr))
}
}
property("should calculate address and return bytes without intermediate ref") {
forAll(accountGen) { acc =>
val addressBytes = Address.fromPublicKey(acc.publicKey, networkByte).bytes
val script =
s"""
| let addressString = "${addressBytes.base58}"
| let maybeAddress = addressFromString(addressString)
| extract(maybeAddress).bytes
""".stripMargin
runScript[ByteVector](script) shouldBe Right(ByteVector(Address.fromBytes(addressBytes.arr, networkByte).explicitGet().bytes.arr))
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/ProvenTransaction.scala
|
package com.amurplatform.transaction
import play.api.libs.json._
import com.amurplatform.utils.Base58
trait ProvenTransaction extends Transaction with Proven {
protected def proofField: Seq[(String, JsValue)] = Seq("proofs" -> JsArray(this.proofs.proofs.map(p => JsString(p.base58))))
protected def jsonBase(): JsObject =
Json.obj(
"type" -> builder.typeId,
"id" -> id().base58,
"sender" -> sender.address,
"senderPublicKey" -> Base58.encode(sender.publicKey),
"fee" -> assetFee._2,
"timestamp" -> timestamp
) ++ JsObject(proofField)
}
|
amur-host/node
|
src/main/scala/com/amurplatform/mining/Miner.scala
|
package com.amurplatform.mining
import cats.data.EitherT
import cats.implicits._
import com.amurplatform.consensus.{GeneratingBalanceProvider, PoSSelector}
import com.amurplatform.features.BlockchainFeatures
import com.amurplatform.features.FeatureProvider._
import com.amurplatform.metrics.{BlockStats, HistogramExt, Instrumented}
import com.amurplatform.network._
import com.amurplatform.settings.{FunctionalitySettings, AmurSettings}
import com.amurplatform.state._
import com.amurplatform.state.appender.{BlockAppender, MicroblockAppender}
import com.amurplatform.utx.UtxPool
import io.netty.channel.group.ChannelGroup
import kamon.Kamon
import kamon.metric.MeasurementUnit
import monix.eval.Task
import monix.execution.cancelables.{CompositeCancelable, SerialCancelable}
import monix.execution.schedulers.SchedulerService
import com.amurplatform.account.{Address, PrivateKeyAccount, PublicKeyAccount}
import com.amurplatform.block.Block._
import com.amurplatform.block.{Block, MicroBlock}
import com.amurplatform.consensus.nxt.NxtLikeConsensusBlockData
import com.amurplatform.utils.{ScorexLogging, Time}
import com.amurplatform.transaction._
import com.amurplatform.wallet.Wallet
import scala.collection.mutable.{Map => MMap}
import scala.concurrent.Await
import scala.concurrent.duration._
trait Miner {
def scheduleMining(): Unit
}
trait MinerDebugInfo {
def state: MinerDebugInfo.State
def collectNextBlockGenerationTimes: List[(Address, Long)]
}
object MinerDebugInfo {
sealed trait State
case object MiningBlocks extends State
case object MiningMicroblocks extends State
case object Disabled extends State
case class Error(error: String) extends State
}
class MinerImpl(allChannels: ChannelGroup,
blockchainUpdater: BlockchainUpdater with NG,
checkpoint: CheckpointService,
settings: AmurSettings,
timeService: Time,
utx: UtxPool,
wallet: Wallet,
pos: PoSSelector,
val minerScheduler: SchedulerService,
val appenderScheduler: SchedulerService)
extends Miner
with MinerDebugInfo
with ScorexLogging
with Instrumented {
import Miner._
private implicit val s: SchedulerService = minerScheduler
private lazy val minerSettings = settings.minerSettings
private lazy val minMicroBlockDurationMills = minerSettings.minMicroBlockAge.toMillis
private lazy val blockchainSettings = settings.blockchainSettings
private val scheduledAttempts = SerialCancelable()
private val microBlockAttempt = SerialCancelable()
private val blockBuildTimeStats = Kamon.histogram("pack-and-forge-block-time", MeasurementUnit.time.milliseconds)
private val microBlockBuildTimeStats = Kamon.histogram("forge-microblock-time", MeasurementUnit.time.milliseconds)
private val nextBlockGenerationTimes: MMap[Address, Long] = MMap.empty
@volatile private var debugState: MinerDebugInfo.State = MinerDebugInfo.Disabled
def collectNextBlockGenerationTimes: List[(Address, Long)] = Await.result(Task.now(nextBlockGenerationTimes.toList).runAsyncLogErr, Duration.Inf)
private def checkAge(parentHeight: Int, parentTimestamp: Long): Either[String, Unit] =
Either
.cond(parentHeight == 1, (), (timeService.correctedTime() - parentTimestamp).millis)
.left
.flatMap(blockAge =>
Either.cond(
blockAge <= minerSettings.intervalAfterLastBlockThenGenerationIsAllowed,
(),
s"BlockChain is too old (last block timestamp is $parentTimestamp generated $blockAge ago)"
))
private def checkScript(account: PrivateKeyAccount): Either[String, Unit] = {
Either.cond(!blockchainUpdater.hasScript(account), (), s"Account(${account.toAddress}) is scripted and therefore not allowed to forge blocks")
}
private def ngEnabled: Boolean = blockchainUpdater.featureActivationHeight(BlockchainFeatures.NG.id).exists(blockchainUpdater.height > _ + 1)
private def generateOneBlockTask(account: PrivateKeyAccount, balance: Long)(
delay: FiniteDuration): Task[Either[String, (MiningConstraints, Block, MiningConstraint)]] = {
Task {
forgeBlock(account, balance)
}.delayExecution(delay)
}
private def consensusData(height: Int,
account: PrivateKeyAccount,
lastBlock: Block,
refBlockBT: Long,
refBlockTS: Long,
balance: Long,
currentTime: Long): Either[String, NxtLikeConsensusBlockData] = {
pos
.consensusData(
account.publicKey,
height,
blockchainSettings.genesisSettings.averageBlockDelay,
refBlockBT,
refBlockTS,
blockchainUpdater.parent(lastBlock, 2).map(_.timestamp),
currentTime
)
.leftMap(_.toString)
}
private def forgeBlock(account: PrivateKeyAccount, balance: Long): Either[String, (MiningConstraints, Block, MiningConstraint)] = {
// should take last block right at the time of mining since microblocks might have been added
val height = blockchainUpdater.height
val version = if (height <= blockchainSettings.functionalitySettings.blockVersion3AfterHeight) PlainBlockVersion else NgBlockVersion
val lastBlock = blockchainUpdater.lastBlock.get
val referencedBlockInfo = blockchainUpdater.bestLastBlockInfo(System.currentTimeMillis() - minMicroBlockDurationMills).get
val refBlockBT = referencedBlockInfo.consensus.baseTarget
val refBlockTS = referencedBlockInfo.timestamp
val refBlockID = referencedBlockInfo.blockId
lazy val currentTime = timeService.correctedTime()
lazy val blockDelay = currentTime - lastBlock.timestamp
measureSuccessful(
blockBuildTimeStats,
for {
_ <- checkQuorumAvailable()
validBlockDelay <- pos
.getValidBlockDelay(height, account.publicKey, refBlockBT, balance)
.leftMap(_.toString)
.ensure(s"$currentTime: Block delay $blockDelay was NOT less than estimated delay")(_ < blockDelay)
_ = log.debug(
s"Forging with ${account.address}, Time $blockDelay > Estimated Time $validBlockDelay, balance $balance, prev block $refBlockID")
_ = log.debug(s"Previous block ID $refBlockID at $height with target $refBlockBT")
consensusData <- consensusData(height, account, lastBlock, refBlockBT, refBlockTS, balance, currentTime)
estimators = MiningConstraints(minerSettings, blockchainUpdater, height)
mdConstraint = MultiDimensionalMiningConstraint(estimators.total, estimators.keyBlock)
(unconfirmed, updatedMdConstraint) = utx.packUnconfirmed(mdConstraint, isSortingRequired())
_ = log.debug(s"Adding ${unconfirmed.size} unconfirmed transaction(s) to new block")
block <- Block
.buildAndSign(version.toByte, currentTime, refBlockID, consensusData, unconfirmed, account, blockFeatures(version))
.leftMap(_.err)
} yield (estimators, block, updatedMdConstraint.constraints.head)
)
}
private def checkQuorumAvailable(): Either[String, Unit] = {
val chanCount = allChannels.size()
Either.cond(chanCount >= minerSettings.quorum, (), s"Quorum not available ($chanCount/${minerSettings.quorum}, not forging block.")
}
private def isSortingRequired(): Boolean = blockchainUpdater.height <= blockchainSettings.functionalitySettings.dontRequireSortedTransactionsAfter
private def blockFeatures(version: Byte): Set[Short] = {
if (version <= 2) Set.empty[Short]
else
settings.featuresSettings.supported
.filterNot(blockchainUpdater.approvedFeatures.keySet)
.filter(BlockchainFeatures.implemented)
.toSet
}
private def generateOneMicroBlockTask(account: PrivateKeyAccount,
accumulatedBlock: Block,
constraints: MiningConstraints,
restTotalConstraint: MiningConstraint): Task[MicroblockMiningResult] = {
log.trace(s"Generating microBlock for $account, constraints: $restTotalConstraint")
val pc = allChannels.size()
if (pc < minerSettings.quorum) {
log.trace(s"Quorum not available ($pc/${minerSettings.quorum}, not forging microblock with ${account.address}")
Task.now(Retry)
} else if (utx.size == 0) {
log.trace(s"Skipping microBlock because utx is empty")
Task.now(Retry)
} else {
val (unconfirmed, updatedTotalConstraint) = measureLog("packing unconfirmed transactions for microblock") {
val mdConstraint = MultiDimensionalMiningConstraint(restTotalConstraint, constraints.micro)
val (unconfirmed, updatedMdConstraint) = utx.packUnconfirmed(mdConstraint, sortInBlock = false)
(unconfirmed, updatedMdConstraint.constraints.head)
}
if (unconfirmed.isEmpty) {
log.trace {
if (updatedTotalConstraint.isEmpty) s"Stopping forging microBlocks, the block is full: $updatedTotalConstraint"
else "Stopping forging microBlocks, because all transactions are too big"
}
Task.now(Stop)
} else {
log.trace(s"Accumulated ${unconfirmed.size} txs for microblock")
val start = System.currentTimeMillis()
(for {
signedBlock <- EitherT.fromEither[Task](
Block.buildAndSign(
version = 3,
timestamp = accumulatedBlock.timestamp,
reference = accumulatedBlock.reference,
consensusData = accumulatedBlock.consensusData,
transactionData = accumulatedBlock.transactionData ++ unconfirmed,
signer = account,
featureVotes = accumulatedBlock.featureVotes
))
microBlock <- EitherT.fromEither[Task](
MicroBlock.buildAndSign(account, unconfirmed, accumulatedBlock.signerData.signature, signedBlock.signerData.signature))
_ = microBlockBuildTimeStats.safeRecord(System.currentTimeMillis() - start)
_ <- EitherT(MicroblockAppender(checkpoint, blockchainUpdater, utx, appenderScheduler)(microBlock))
} yield (microBlock, signedBlock)).value map {
case Left(err) => Error(err)
case Right((microBlock, signedBlock)) =>
BlockStats.mined(microBlock)
allChannels.broadcast(MicroBlockInv(account, microBlock.totalResBlockSig, microBlock.prevResBlockSig))
if (updatedTotalConstraint.isEmpty) {
log.trace(s"$microBlock has been mined for $account. Stop forging microBlocks, the block is full: $updatedTotalConstraint")
Stop
} else {
log.trace(s"$microBlock has been mined for $account")
Success(signedBlock, updatedTotalConstraint)
}
}
}
}
}
private def generateMicroBlockSequence(account: PrivateKeyAccount,
accumulatedBlock: Block,
delay: FiniteDuration,
constraints: MiningConstraints,
restTotalConstraint: MiningConstraint): Task[Unit] = {
debugState = MinerDebugInfo.MiningMicroblocks
log.info(s"Generate MicroBlock sequence, delay = $delay")
generateOneMicroBlockTask(account, accumulatedBlock, constraints, restTotalConstraint)
.asyncBoundary(minerScheduler)
.delayExecution(delay)
.flatMap {
case Error(e) =>
Task {
debugState = MinerDebugInfo.Error(e.toString)
log.warn("Error mining MicroBlock: " + e.toString)
}
case Success(newTotal, updatedTotalConstraint) =>
generateMicroBlockSequence(account, newTotal, minerSettings.microBlockInterval, constraints, updatedTotalConstraint)
case Retry => generateMicroBlockSequence(account, accumulatedBlock, minerSettings.microBlockInterval, constraints, restTotalConstraint)
case Stop =>
Task {
debugState = MinerDebugInfo.MiningBlocks
log.debug("MicroBlock mining completed, block is full")
}
}
}
private def nextBlockGenerationTime(fs: FunctionalitySettings,
height: Int,
block: Block,
account: PublicKeyAccount): Either[String, (Long, Long)] = {
val balance = GeneratingBalanceProvider.balance(blockchainUpdater, fs, height, account.toAddress)
if (GeneratingBalanceProvider.isMiningAllowed(blockchainUpdater, height, balance)) {
for {
expectedTS <- pos
.getValidBlockDelay(height, account.publicKey, block.consensusData.baseTarget, balance)
.map(_ + block.timestamp)
.leftMap(_.toString)
result <- Either.cond(
0 < expectedTS && expectedTS < Long.MaxValue,
(balance, expectedTS),
s"Invalid next block generation time: $expectedTS"
)
} yield result
} else Left(s"Balance $balance of ${account.address} is lower than required for generation")
}
private def generateBlockTask(account: PrivateKeyAccount): Task[Unit] = {
{
val height = blockchainUpdater.height
val lastBlock = blockchainUpdater.lastBlock.get
for {
_ <- checkAge(height, blockchainUpdater.lastBlockTimestamp.get)
_ <- checkScript(account)
balanceAndTs <- nextBlockGenerationTime(blockchainSettings.functionalitySettings, height, lastBlock, account)
(balance, ts) = balanceAndTs
calculatedOffset = ts - timeService.correctedTime()
offset = Math.max(calculatedOffset, minerSettings.minimalBlockGenerationOffset.toMillis).millis
} yield (offset, balance)
} match {
case Right((offset, balance)) =>
log.debug(s"Next attempt for acc=$account in $offset")
nextBlockGenerationTimes += account.toAddress -> (System.currentTimeMillis() + offset.toMillis)
generateOneBlockTask(account, balance)(offset).flatMap {
case Right((estimators, block, totalConstraint)) =>
BlockAppender(checkpoint, blockchainUpdater, timeService, utx, pos, settings, appenderScheduler)(block)
.asyncBoundary(minerScheduler)
.map {
case Left(err) => log.warn("Error mining Block: " + err.toString)
case Right(Some(score)) =>
log.debug(s"Forged and applied $block by ${account.address} with cumulative score $score")
BlockStats.mined(block, blockchainUpdater.height)
allChannels.broadcast(BlockForged(block))
scheduleMining()
if (ngEnabled && !totalConstraint.isEmpty) startMicroBlockMining(account, block, estimators, totalConstraint)
case Right(None) => log.warn("Newly created block has already been appended, should not happen")
}
case Left(err) =>
log.debug(s"No block generated because $err, retrying")
generateBlockTask(account)
}
case Left(err) =>
log.debug(s"Not scheduling block mining because $err")
debugState = MinerDebugInfo.Error(err)
Task.unit
}
}
def scheduleMining(): Unit = {
Miner.blockMiningStarted.increment()
val nonScriptedAccounts = wallet.privateKeyAccounts.filterNot(blockchainUpdater.hasScript(_))
scheduledAttempts := CompositeCancelable.fromSet(nonScriptedAccounts.map(generateBlockTask).map(_.runAsyncLogErr).toSet)
microBlockAttempt := SerialCancelable()
debugState = MinerDebugInfo.MiningBlocks
}
private def startMicroBlockMining(account: PrivateKeyAccount,
lastBlock: Block,
constraints: MiningConstraints,
restTotalConstraint: MiningConstraint): Unit = {
log.info(s"Start mining microblocks")
Miner.microMiningStarted.increment()
microBlockAttempt := generateMicroBlockSequence(account, lastBlock, Duration.Zero, constraints, restTotalConstraint).runAsyncLogErr
log.trace(s"MicroBlock mining scheduled for $account")
}
override def state: MinerDebugInfo.State = debugState
}
object Miner {
val blockMiningStarted = Kamon.counter("block-mining-started")
val microMiningStarted = Kamon.counter("micro-mining-started")
val MaxTransactionsPerMicroblock: Int = 500
val Disabled = new Miner with MinerDebugInfo {
override def scheduleMining(): Unit = ()
override def collectNextBlockGenerationTimes: List[(Address, Long)] = List.empty
override val state = MinerDebugInfo.Disabled
}
sealed trait MicroblockMiningResult
case object Stop extends MicroblockMiningResult
case object Retry extends MicroblockMiningResult
case class Error(e: ValidationError) extends MicroblockMiningResult
case class Success(b: Block, totalConstraint: MiningConstraint) extends MicroblockMiningResult
}
|
amur-host/node
|
lang/shared/src/main/scala/com/amurplatform/lang/v1/traits/domain/Recipient.scala
|
package com.amurplatform.lang.v1.traits.domain
import scodec.bits.ByteVector
trait Recipient
object Recipient {
case class Address(bytes: ByteVector) extends Recipient
case class Alias(name: String) extends Recipient
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/matcher/RoundingIssuesTestSuite.scala
|
package com.amurplatform.it.sync.matcher
import com.typesafe.config.{Config, ConfigFactory}
import com.amurplatform.account.PrivateKeyAccount
import com.amurplatform.api.http.assets.SignedIssueV1Request
import com.amurplatform.it.ReportingTestName
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.it.api.SyncMatcherHttpApi._
import com.amurplatform.it.sync.CustomFeeTransactionSuite.defaultAssetQuantity
import com.amurplatform.it.transactions.NodesFromDocker
import com.amurplatform.it.util._
import com.amurplatform.transaction.AssetId
import com.amurplatform.transaction.assets.IssueTransactionV1
import com.amurplatform.transaction.assets.exchange.{AssetPair, OrderType}
import com.amurplatform.utils.Base58
import org.scalatest.{BeforeAndAfterAll, CancelAfterFailure, FreeSpec, Matchers}
import scala.concurrent.duration._
import scala.util.Random
class RoundingIssuesTestSuite
extends FreeSpec
with Matchers
with BeforeAndAfterAll
with CancelAfterFailure
with NodesFromDocker
with ReportingTestName {
import RoundingIssuesTestSuite._
override protected def nodeConfigs: Seq[Config] = Configs
private def matcherNode = nodes.head
private def aliceNode = nodes(1)
private def bobNode = nodes(2)
matcherNode.signedIssue(createSignedIssueRequest(IssueUsdTx))
nodes.waitForHeightArise()
"should correctly fill an order with small amount" in {
val aliceBalanceBefore = matcherNode.accountBalances(aliceNode.address)._1
val bobBalanceBefore = matcherNode.accountBalances(bobNode.address)._1
val counter = matcherNode.prepareOrder(aliceNode, amurUsdPair, OrderType.BUY, 238, 3100000000L)
val counterId = matcherNode.placeOrder(counter).message.id
val submitted = matcherNode.prepareOrder(bobNode, amurUsdPair, OrderType.SELL, 235, 425532L)
val submittedId = matcherNode.placeOrder(submitted).message.id
matcherNode.waitOrderStatusAndAmount(amurUsdPair, submittedId, "Filled", Some(420169L), 1.minute)
matcherNode.waitOrderStatusAndAmount(amurUsdPair, counterId, "PartiallyFilled", Some(420169L), 1.minute)
matcherNode.cancelOrder(aliceNode, amurUsdPair, Some(counterId))
val tx = matcherNode.transactionsByOrder(counterId).head
matcherNode.waitForTransaction(tx.id)
val rawExchangeTx = matcherNode.rawTransactionInfo(tx.id)
(rawExchangeTx \ "price").as[Long] shouldBe 238L
(rawExchangeTx \ "amount").as[Long] shouldBe 420169L
(rawExchangeTx \ "buyMatcherFee").as[Long] shouldBe 40L
(rawExchangeTx \ "sellMatcherFee").as[Long] shouldBe 296219L
val aliceBalanceAfter = matcherNode.accountBalances(aliceNode.address)._1
val bobBalanceAfter = matcherNode.accountBalances(bobNode.address)._1
(aliceBalanceAfter - aliceBalanceBefore) shouldBe (-40L + 420169L)
(bobBalanceAfter - bobBalanceBefore) shouldBe (-296219L - 420169L)
}
}
object RoundingIssuesTestSuite {
import ConfigFactory._
import com.amurplatform.it.NodeConfigs._
private val ForbiddenAssetId = "FdbnAsset"
private val Decimals: Byte = 2
private val minerDisabled = parseString("amur.miner.enable = no")
private val matcherConfig = parseString(s"""
|amur.matcher {
| enable = yes
| account = <KEY>
| bind-address = "0.0.0.0"
| order-match-tx-fee = 300000
| blacklisted-assets = ["$ForbiddenAssetId"]
| balance-watching.enable = yes
|}""".stripMargin)
private val _Configs: Seq[Config] = (Default.last +: Random.shuffle(Default.init).take(3))
.zip(Seq(matcherConfig, minerDisabled, minerDisabled, empty()))
.map { case (n, o) => o.withFallback(n) }
private val aliceSeed = _Configs(1).getString("account-seed")
private val alicePk = PrivateKeyAccount.fromSeed(aliceSeed).right.get
val IssueUsdTx: IssueTransactionV1 = IssueTransactionV1
.selfSigned(
sender = alicePk,
name = "USD-X".getBytes(),
description = "asset description".getBytes(),
quantity = defaultAssetQuantity,
decimals = Decimals,
reissuable = false,
fee = 1.amur,
timestamp = System.currentTimeMillis()
)
.right
.get
val UsdId: AssetId = IssueUsdTx.id()
val amurUsdPair = AssetPair(
amountAsset = None,
priceAsset = Some(UsdId)
)
private val updatedMatcherConfig = parseString(s"""
|amur.matcher {
| price-assets = [ "$UsdId", "AMUR"]
|}
""".stripMargin)
private val Configs = _Configs.map(updatedMatcherConfig.withFallback(_))
def createSignedIssueRequest(tx: IssueTransactionV1): SignedIssueV1Request = {
import tx._
SignedIssueV1Request(
Base58.encode(tx.sender.publicKey),
new String(name),
new String(description),
quantity,
decimals,
reissuable,
fee,
timestamp,
signature.base58
)
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/api/http/leasing/SignedLeaseV2Request.scala
|
package com.amurplatform.api.http.leasing
import cats.implicits._
import io.swagger.annotations.ApiModelProperty
import play.api.libs.json.{Format, Json}
import com.amurplatform.account.{AddressOrAlias, PublicKeyAccount}
import com.amurplatform.api.http.BroadcastRequest
import com.amurplatform.transaction.lease.LeaseTransactionV2
import com.amurplatform.transaction.{Proofs, ValidationError}
case class SignedLeaseV2Request(@ApiModelProperty(required = true)
version: Byte,
@ApiModelProperty(value = "Base58 encoded sender public key", required = true)
senderPublicKey: String,
@ApiModelProperty(required = true)
amount: Long,
@ApiModelProperty(required = true)
fee: Long,
@ApiModelProperty(value = "Recipient address", required = true)
recipient: String,
@ApiModelProperty(required = true)
timestamp: Long,
@ApiModelProperty(required = true)
proofs: List[String])
extends BroadcastRequest {
def toTx: Either[ValidationError, LeaseTransactionV2] =
for {
_sender <- PublicKeyAccount.fromBase58String(senderPublicKey)
_proofBytes <- proofs.traverse(s => parseBase58(s, "invalid proof", Proofs.MaxProofStringSize))
_proofs <- Proofs.create(_proofBytes)
_recipient <- AddressOrAlias.fromString(recipient)
_t <- LeaseTransactionV2.create(version, _sender, amount, fee, timestamp, _recipient, _proofs)
} yield _t
}
object SignedLeaseV2Request {
implicit val broadcastLeaseRequestReadsFormat: Format[SignedLeaseV2Request] = Json.format
}
|
amur-host/node
|
src/main/scala/com/amurplatform/mining/MiningConstraints.scala
|
<gh_stars>1-10
package com.amurplatform.mining
import cats.data.NonEmptyList
import com.amurplatform.features.BlockchainFeatures
import com.amurplatform.features.FeatureProvider._
import com.amurplatform.settings.MinerSettings
import com.amurplatform.state.Blockchain
import com.amurplatform.block.Block
case class MiningConstraints(total: MiningConstraint, keyBlock: MiningConstraint, micro: MiningConstraint)
object MiningConstraints {
val MaxScriptRunsInBlock = 100
private val ClassicAmountOfTxsInBlock = 100
private val MaxTxsSizeInBytes = 1 * 1024 * 1024 // 1 megabyte
def apply(minerSettings: MinerSettings, blockchain: Blockchain, height: Int): MiningConstraints = {
val activatedFeatures = blockchain.activatedFeaturesAt(height)
val isNgEnabled = activatedFeatures.contains(BlockchainFeatures.NG.id)
val isMassTransferEnabled = activatedFeatures.contains(BlockchainFeatures.MassTransfer.id)
val isScriptEnabled = activatedFeatures.contains(BlockchainFeatures.SmartAccounts.id)
val total: MiningConstraint =
if (isMassTransferEnabled) OneDimensionalMiningConstraint(MaxTxsSizeInBytes, TxEstimators.sizeInBytes)
else {
val maxTxs = if (isNgEnabled) Block.MaxTransactionsPerBlockVer3 else ClassicAmountOfTxsInBlock
OneDimensionalMiningConstraint(maxTxs, TxEstimators.one)
}
new MiningConstraints(
total =
if (isScriptEnabled)
MultiDimensionalMiningConstraint(NonEmptyList.of(OneDimensionalMiningConstraint(MaxScriptRunsInBlock, TxEstimators.scriptRunNumber), total))
else total,
keyBlock =
if (isMassTransferEnabled) OneDimensionalMiningConstraint(0, TxEstimators.one)
else {
val maxTxsForKeyBlock = if (isNgEnabled) minerSettings.maxTransactionsInKeyBlock else ClassicAmountOfTxsInBlock
OneDimensionalMiningConstraint(maxTxsForKeyBlock, TxEstimators.one)
},
micro =
if (isNgEnabled) OneDimensionalMiningConstraint(minerSettings.maxTransactionsInMicroBlock, TxEstimators.one)
else MiningConstraint.Unlimited
)
}
}
|
amur-host/node
|
discovery/src/main/scala/com.amurplatform.discovery/Settings.scala
|
<gh_stars>1-10
package com.amurplatform.discovery
import java.net.InetSocketAddress
import net.ceedubs.ficus.Ficus._
import com.typesafe.config.{Config, ConfigFactory}
import net.ceedubs.ficus.readers.{NameMapper, ValueReader}
import scala.concurrent.duration.FiniteDuration
case class ChainSettings(chainId: Char, initialPeers: Seq[InetSocketAddress])
case class Settings(chains: Seq[ChainSettings], webSocketHost: String, webSocketPort: Int, workersCount: Int, discoveryInterval: FiniteDuration)
object Settings {
implicit val readConfigInHyphen: NameMapper = net.ceedubs.ficus.readers.namemappers.implicits.hyphenCase // IDEA bug
implicit val inetSocketAddressReader: ValueReader[InetSocketAddress] = { (config: Config, path: String) =>
val value = config.as[String](s"$path").split(":")
new InetSocketAddress(
value(0),
value(1).toInt
)
}
implicit val charReader: ValueReader[Char] = (config: Config, path: String) => config.as[String](s"$path").head
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
lazy val default: Settings = ConfigFactory.load().as[Settings]("discovery")
}
|
amur-host/node
|
src/main/scala/com/amurplatform/api/http/assets/IssueV1Request.scala
|
package com.amurplatform.api.http.assets
import io.swagger.annotations.ApiModelProperty
import play.api.libs.json.{Format, Json}
case class IssueV1Request(sender: String,
name: String,
description: String,
quantity: Long,
@ApiModelProperty(allowableValues = "range[0,8]", example = "8", dataType = "integer", required = true)
decimals: Byte,
reissuable: Boolean,
fee: Long,
timestamp: Option[Long] = None)
object IssueV1Request {
implicit val issueFormat: Format[IssueV1Request] = Json.format
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/AssetAcc.scala
|
package com.amurplatform.transaction
import com.amurplatform.account.Address
case class AssetAcc(account: Address, assetId: Option[AssetId])
|
amur-host/node
|
src/test/scala/com/amurplatform/settings/FeesSettingsSpecification.scala
|
package com.amurplatform.settings
import com.typesafe.config.ConfigException.WrongType
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpec, Matchers}
class FeesSettingsSpecification extends FlatSpec with Matchers {
"FeesSettings" should "read values" in {
val config = ConfigFactory.parseString("""amur {
| network.file = "xxx"
| fees {
| payment.AMUR = 100000
| issue.AMUR = 100000000
| transfer.AMUR = 100000
| reissue.AMUR = 100000
| burn.AMUR = 100000
| exchange.AMUR = 100000
| }
| miner.timeout = 10
|}
""".stripMargin).resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees.size should be(6)
settings.fees(2) should be(List(FeeSettings("AMUR", 100000)))
settings.fees(3) should be(List(FeeSettings("AMUR", 100000000)))
settings.fees(4) should be(List(FeeSettings("AMUR", 100000)))
settings.fees(5) should be(List(FeeSettings("AMUR", 100000)))
settings.fees(6) should be(List(FeeSettings("AMUR", 100000)))
settings.fees(7) should be(List(FeeSettings("AMUR", 100000)))
}
it should "combine read few fees for one transaction type" in {
val config = ConfigFactory.parseString("""amur.fees {
| payment {
| AMUR0 = 0
| }
| issue {
| AMUR1 = 111
| AMUR2 = 222
| AMUR3 = 333
| }
| transfer {
| AMUR4 = 444
| }
|}
""".stripMargin).resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees.size should be(3)
settings.fees(2).toSet should equal(Set(FeeSettings("AMUR0", 0)))
settings.fees(3).toSet should equal(Set(FeeSettings("AMUR1", 111), FeeSettings("AMUR2", 222), FeeSettings("AMUR3", 333)))
settings.fees(4).toSet should equal(Set(FeeSettings("AMUR4", 444)))
}
it should "allow empty list" in {
val config = ConfigFactory.parseString("amur.fees {}".stripMargin).resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees.size should be(0)
}
it should "override values" in {
val config = ConfigFactory
.parseString("""amur.fees {
| payment.AMUR1 = 1111
| reissue.AMUR5 = 0
|}
""".stripMargin)
.withFallback(
ConfigFactory.parseString("""amur.fees {
| payment.AMUR = 100000
| issue.AMUR = 100000000
| transfer.AMUR = 100000
| reissue.AMUR = 100000
| burn.AMUR = 100000
| exchange.AMUR = 100000
|}
""".stripMargin)
)
.resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees.size should be(6)
settings.fees(2).toSet should equal(Set(FeeSettings("AMUR", 100000), FeeSettings("AMUR1", 1111)))
settings.fees(5).toSet should equal(Set(FeeSettings("AMUR", 100000), FeeSettings("AMUR5", 0)))
}
it should "fail on incorrect long values" in {
val config = ConfigFactory.parseString("""amur.fees {
| payment.AMUR=N/A
|}""".stripMargin).resolve()
intercept[WrongType] {
FeesSettings.fromConfig(config)
}
}
it should "not fail on long values as strings" in {
val config = ConfigFactory.parseString("""amur.fees {
| transfer.AMUR="1000"
|}""".stripMargin).resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees(4).toSet should equal(Set(FeeSettings("AMUR", 1000)))
}
it should "fail on unknown transaction type" in {
val config = ConfigFactory.parseString("""amur.fees {
| shmayment.AMUR=100
|}""".stripMargin).resolve()
intercept[NoSuchElementException] {
FeesSettings.fromConfig(config)
}
}
it should "override values from default config" in {
val defaultConfig = ConfigFactory.load()
val config = ConfigFactory.parseString("""
|amur.fees {
| issue {
| AMUR = 200000000
| }
| transfer {
| AMUR = 300000
| "6MPKrD5B7GrfbciHECg1MwdvRUhRETApgNZspreBJ8JL" = 1
| }
| reissue {
| AMUR = 400000
| }
| burn {
| AMUR = 500000
| }
| exchange {
| AMUR = 600000
| }
| lease {
| AMUR = 700000
| }
| lease-cancel {
| AMUR = 800000
| }
| create-alias {
| AMUR = 900000
| }
| mass-transfer {
| AMUR = 10000
| }
| data {
| AMUR = 200000
| }
| set-script {
| AMUR = 300000
| }
| sponsor-fee {
| AMUR = 400000
| }
|}
""".stripMargin).withFallback(defaultConfig).resolve()
val settings = FeesSettings.fromConfig(config)
settings.fees.size should be(12)
settings.fees(3).toSet should equal(Set(FeeSettings("AMUR", 200000000)))
settings.fees(4).toSet should equal(Set(FeeSettings("AMUR", 300000), FeeSettings("6MPKrD5B7GrfbciHECg1MwdvRUhRETApgNZspreBJ8JL", 1)))
settings.fees(5).toSet should equal(Set(FeeSettings("AMUR", 400000)))
settings.fees(6).toSet should equal(Set(FeeSettings("AMUR", 500000)))
settings.fees(7).toSet should equal(Set(FeeSettings("AMUR", 600000)))
settings.fees(8).toSet should equal(Set(FeeSettings("AMUR", 700000)))
settings.fees(9).toSet should equal(Set(FeeSettings("AMUR", 800000)))
settings.fees(10).toSet should equal(Set(FeeSettings("AMUR", 900000)))
settings.fees(11).toSet should equal(Set(FeeSettings("AMUR", 10000)))
settings.fees(12).toSet should equal(Set(FeeSettings("AMUR", 200000)))
settings.fees(13).toSet should equal(Set(FeeSettings("AMUR", 300000)))
settings.fees(14).toSet should equal(Set(FeeSettings("AMUR", 400000)))
}
}
|
amur-host/node
|
it/src/main/scala/com/amurplatform/it/util/package.scala
|
package com.amurplatform.it
import com.amurplatform.settings.Constants
import io.netty.util.Timer
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
package object util {
implicit class TimerExt(val timer: Timer) extends AnyVal {
def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = {
val p = Promise[A]
try {
timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit)
} catch {
case NonFatal(e) => p.failure(e)
}
p.future
}
def retryUntil[A](f: => Future[A], cond: A => Boolean, retryInterval: FiniteDuration)(implicit ec: ExecutionContext): Future[A] =
f.flatMap(v => if (cond(v)) Future.successful(v) else schedule(retryUntil(f, cond, retryInterval), retryInterval))
}
implicit class DoubleExt(val d: Double) extends AnyVal {
def amur: Long = (d * Constants.UnitsInWave).toLong
}
}
|
amur-host/node
|
lang/shared/src/main/scala/com/amurplatform/lang/v1/compiler/package.scala
|
package com.amurplatform.lang.v1
import cats.implicits._
import com.amurplatform.lang.v1.task.TaskM
import com.amurplatform.lang.v1.task.imports._
package object compiler {
type CompileM[A] = TaskM[CompilerContext, CompilationError, A]
implicit class EiExt[A](ei: Either[CompilationError, A]) {
def toCompileM: CompileM[A] =
ei.fold(raiseError, _.pure[CompileM])
}
}
|
amur-host/node
|
it/src/main/scala/com/amurplatform/it/api/model.scala
|
<reponame>amur-host/node<gh_stars>1-10
package com.amurplatform.it.api
import play.api.libs.json.{Format, Json}
// USCE no longer contains references to non-serializable Request/Response objects
// to work around https://github.com/scalatest/scalatest/issues/556
case class UnexpectedStatusCodeException(requestUrl: String, statusCode: Int, responseBody: String)
extends Exception(s"Request: $requestUrl; Unexpected status code ($statusCode): $responseBody")
case class Status(blockchainHeight: Int, stateHeight: Int, updatedTimestamp: Long, updatedDate: String)
object Status {
implicit val statusFormat: Format[Status] = Json.format
}
case class Peer(address: String, declaredAddress: String, peerName: String)
object Peer {
implicit val peerFormat: Format[Peer] = Json.format
}
case class Address(address: String)
object Address {
implicit val addressFormat: Format[Address] = Json.format
}
case class Balance(address: String, confirmations: Int, balance: Long)
object Balance {
implicit val balanceFormat: Format[Balance] = Json.format
}
case class AssetBalance(address: String, assetId: String, balance: Long)
object AssetBalance {
implicit val assetBalanceFormat: Format[AssetBalance] = Json.format
}
case class CompiledScript(script: String)
object CompiledScript {
implicit val compiledScriptFormat: Format[CompiledScript] = Json.format
}
case class FullAssetInfo(assetId: String,
balance: Long,
reissuable: Boolean,
minSponsoredAssetFee: Option[Long],
sponsorBalance: Option[Long],
quantity: Long)
object FullAssetInfo {
implicit val fullAssetInfoFormat: Format[FullAssetInfo] = Json.format
}
case class FullAssetsInfo(address: String, balances: List[FullAssetInfo])
object FullAssetsInfo {
implicit val fullAssetsInfoFormat: Format[FullAssetsInfo] = Json.format
}
case class AssetInfo(assetId: String,
issueHeight: Int,
issueTimestamp: Long,
issuer: String,
name: String,
description: String,
decimals: Int,
reissuable: Boolean,
quantity: Long,
minSponsoredAssetFee: Option[Long])
object AssetInfo {
implicit val AssetInfoFormat: Format[AssetInfo] = Json.format
}
case class Transaction(`type`: Int, id: String, fee: Long, timestamp: Long, sender: Option[String])
object Transaction {
implicit val transactionFormat: Format[Transaction] = Json.format
}
case class TransactionInfo(`type`: Int,
id: String,
fee: Long,
timestamp: Long,
sender: Option[String],
height: Int,
minSponsoredAssetFee: Option[Long],
recipient: Option[String])
object TransactionInfo {
implicit val format: Format[TransactionInfo] = Json.format
}
case class OrderInfo(id: String,
sender: String,
senderPublicKey: String,
matcherPublicKey: String,
assetPair: AssetPairResponse,
orderType: String,
price: Long,
amount: Long,
timestamp: Long,
expiration: Long,
matcherFee: Long,
signature: String)
object OrderInfo {
implicit val transactionFormat: Format[OrderInfo] = Json.format
}
case class AssetPairResponse(amountAsset: Option[String], priceAsset: Option[String])
object AssetPairResponse {
implicit val pairResponseFormat: Format[AssetPairResponse] = Json.format
}
case class ExchangeTransaction(`type`: Int,
id: String,
sender: String,
senderPublicKey: String,
fee: Long,
timestamp: Long,
signature: String,
order1: OrderInfo,
order2: OrderInfo,
price: Long,
amount: Long,
buyMatcherFee: Long,
sellMatcherFee: Long,
height: Option[Int])
object ExchangeTransaction {
implicit val transactionFormat: Format[ExchangeTransaction] = Json.format
}
case class Block(signature: String,
height: Int,
timestamp: Long,
generator: String,
transactions: Seq[Transaction],
fee: Long,
features: Option[Seq[Short]])
object Block {
implicit val blockFormat: Format[Block] = Json.format
}
case class BlockHeaders(signature: String,
height: Int,
timestamp: Long,
generator: String,
transactionCount: Int,
blocksize: Int,
features: Option[Set[Short]])
object BlockHeaders {
implicit val blockHeadersFormat: Format[BlockHeaders] = Json.format
}
case class MatcherMessage(id: String)
object MatcherMessage {
implicit val matcherMessageFormat: Format[MatcherMessage] = Json.format
}
case class MatcherResponse(status: String, message: MatcherMessage)
object MatcherResponse {
implicit val matcherResponseFormat: Format[MatcherResponse] = Json.format
}
case class MarketDataInfo(matcherPublicKey: String, markets: Seq[MarketData])
object MarketDataInfo {
implicit val marketDataInfoResponseFormat: Format[MarketDataInfo] = Json.format
}
case class AssetDecimalsInfo(decimals: Byte)
object AssetDecimalsInfo {
implicit val assetDecimalsInfoResponseFormat: Format[AssetDecimalsInfo] = Json.format
}
case class MarketData(amountAssetName: String,
priceAssetName: String,
created: Long,
amountAssetInfo: Option[AssetDecimalsInfo],
priceAssetInfo: Option[AssetDecimalsInfo])
object MarketData {
implicit val marketData: Format[MarketData] = Json.format
}
case class MatcherStatusResponse(status: String, filledAmount: Option[Long])
object MatcherStatusResponse {
implicit val matcherStatusResponseFormat: Format[MatcherStatusResponse] = Json.format
}
case class MessageMatcherResponse(message: String)
object MessageMatcherResponse {
implicit val messageMatcherResponseFormat: Format[MessageMatcherResponse] = Json.format
}
case class OrderbookHistory(id: String, `type`: String, amount: Long, price: Long, timestamp: Long, filled: Int, status: String)
object OrderbookHistory {
implicit val orderbookHistory: Format[OrderbookHistory] = Json.format
}
case class PairResponse(amountAsset: String, priceAsset: String)
object PairResponse {
implicit val pairResponseFormat: Format[PairResponse] = Json.format
}
case class LevelResponse(price: Long, amount: Long)
object LevelResponse {
implicit val levelResponseFormat: Format[LevelResponse] = Json.format
}
case class OrderBookResponse(timestamp: Long, pair: PairResponse, bids: List[LevelResponse], asks: List[LevelResponse])
object OrderBookResponse {
implicit val orderBookResponseFormat: Format[OrderBookResponse] = Json.format
}
case class MarketStatusResponse(lastPrice: Option[Long],
lastSide: Option[String],
bid: Option[Long],
bidAmount: Option[Long],
ask: Option[Long],
askAmount: Option[Long])
object MarketStatusResponse {
implicit val marketResponseFormat: Format[MarketStatusResponse] = Json.format
}
case class DebugInfo(stateHeight: Long, stateHash: Long)
object DebugInfo {
implicit val debugInfoFormat: Format[DebugInfo] = Json.format
}
case class BlacklistedPeer(hostname: String, timestamp: Long, reason: String)
object BlacklistedPeer {
implicit val blacklistedPeerFormat: Format[BlacklistedPeer] = Json.format
}
case class State(address: String, miningBalance: Long, timestamp: Long)
object State {
implicit val StateFormat: Format[State] = Json.format
}
case class FeeInfo(feeAssetId: Option[String], feeAmount: Long)
object FeeInfo {
implicit val format: Format[FeeInfo] = Json.format
}
// Obsolete payment request
case class PaymentRequest(amount: Long, fee: Long, sender: String, recipient: String)
object PaymentRequest {
implicit val paymentFormat: Format[PaymentRequest] = Json.format
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/assets/SponsorFeeTransaction.scala
|
package com.amurplatform.transaction.assets
import com.google.common.primitives.{Bytes, Longs}
import com.amurplatform.crypto
import com.amurplatform.state._
import monix.eval.Coeval
import play.api.libs.json.{JsObject, Json}
import com.amurplatform.account.{PrivateKeyAccount, PublicKeyAccount}
import com.amurplatform.transaction._
import com.amurplatform.crypto._
import scala.util.{Failure, Success, Try}
case class SponsorFeeTransaction private (version: Byte,
sender: PublicKeyAccount,
assetId: ByteStr,
minSponsoredAssetFee: Option[Long],
fee: Long,
timestamp: Long,
proofs: Proofs)
extends ProvenTransaction
with VersionedTransaction
with FastHashId {
override val builder: SponsorFeeTransaction.type = SponsorFeeTransaction
val bodyBytes: Coeval[Array[Byte]] = Coeval.evalOnce(
Bytes.concat(
Array(builder.typeId),
Array(version),
sender.publicKey,
assetId.arr,
Longs.toByteArray(minSponsoredAssetFee.getOrElse(0)),
Longs.toByteArray(fee),
Longs.toByteArray(timestamp)
))
override val json: Coeval[JsObject] = Coeval.evalOnce(
jsonBase() ++ Json.obj(
"version" -> version,
"assetId" -> assetId.base58,
"minSponsoredAssetFee" -> minSponsoredAssetFee
))
override val assetFee: (Option[AssetId], Long) = (None, fee)
override val bytes: Coeval[Array[Byte]] = Coeval.evalOnce(Bytes.concat(Array(0: Byte, builder.typeId, version), bodyBytes(), proofs.bytes()))
}
object SponsorFeeTransaction extends TransactionParserFor[SponsorFeeTransaction] with TransactionParser.MultipleVersions {
override val typeId: Byte = 14
override val supportedVersions: Set[Byte] = Set(1)
override protected def parseTail(version: Byte, bytes: Array[Byte]): Try[TransactionT] =
Try {
val txId = bytes(0)
require(txId == typeId, s"Signed tx id is not match")
val bodyVersion = bytes(1)
require(version == bodyVersion, s"versions are not match ($version, $bodyVersion)")
val sender = PublicKeyAccount(bytes.slice(2, KeyLength + 2))
val assetId = ByteStr(bytes.slice(KeyLength + 2, KeyLength + AssetIdLength + 2))
val minFeeStart = KeyLength + AssetIdLength + 2
val minFee = Longs.fromByteArray(bytes.slice(minFeeStart, minFeeStart + 8))
val fee = Longs.fromByteArray(bytes.slice(minFeeStart + 8, minFeeStart + 16))
val timestamp = Longs.fromByteArray(bytes.slice(minFeeStart + 16, minFeeStart + 24))
val tx = for {
proofs <- Proofs.fromBytes(bytes.drop(minFeeStart + 24))
tx <- SponsorFeeTransaction.create(version, sender, assetId, Some(minFee).filter(_ != 0), fee, timestamp, proofs)
} yield {
tx
}
tx.fold(left => Failure(new Exception(left.toString)), right => Success(right))
}.flatten
def create(version: Byte,
sender: PublicKeyAccount,
assetId: ByteStr,
minSponsoredAssetFee: Option[Long],
fee: Long,
timestamp: Long,
proofs: Proofs): Either[ValidationError, TransactionT] =
if (!supportedVersions.contains(version)) {
Left(ValidationError.UnsupportedVersion(version))
} else if (minSponsoredAssetFee.exists(_ <= 0)) {
Left(ValidationError.NegativeMinFee(minSponsoredAssetFee.get, "asset"))
} else if (fee <= 0) {
Left(ValidationError.InsufficientFee())
} else {
Right(SponsorFeeTransaction(version, sender, assetId, minSponsoredAssetFee, fee, timestamp, proofs))
}
def signed(version: Byte,
sender: PublicKeyAccount,
assetId: ByteStr,
minSponsoredAssetFee: Option[Long],
fee: Long,
timestamp: Long,
signer: PrivateKeyAccount): Either[ValidationError, TransactionT] =
create(version, sender, assetId, minSponsoredAssetFee, fee, timestamp, Proofs.empty).right.map { unsigned =>
unsigned.copy(proofs = Proofs.create(Seq(ByteStr(crypto.sign(signer, unsigned.bodyBytes())))).explicitGet())
}
def selfSigned(version: Byte,
sender: PrivateKeyAccount,
assetId: ByteStr,
minSponsoredAssetFee: Option[Long],
fee: Long,
timestamp: Long): Either[ValidationError, TransactionT] =
signed(version, sender, assetId, minSponsoredAssetFee, fee, timestamp, sender)
}
|
amur-host/node
|
src/main/scala/com/amurplatform/utils/ApplicationStopReason.scala
|
<gh_stars>1-10
package com.amurplatform.utils
sealed abstract class ApplicationStopReason(val code: Int)
case object Default extends ApplicationStopReason(1)
case object UnsupportedFeature extends ApplicationStopReason(38)
|
amur-host/node
|
src/test/scala/com/amurplatform/network/InMemoryInvalidBlockStorage.scala
|
<reponame>amur-host/node
package com.amurplatform.network
import com.amurplatform.state.ByteStr
import com.amurplatform.transaction.ValidationError
import com.amurplatform.transaction.ValidationError.GenericError
class InMemoryInvalidBlockStorage extends InvalidBlockStorage {
var s: Set[ByteStr] = Set.empty[ByteStr]
override def add(blockId: ByteStr, validationError: ValidationError): Unit = s += blockId
override def find(blockId: ByteStr): Option[ValidationError] = {
if (s.contains(blockId)) Some(GenericError("Unknown")) else None
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/state/diffs/BlockDiffer.scala
|
<reponame>amur-host/node
package com.amurplatform.state.diffs
import cats.Monoid
import cats.implicits._
import cats.syntax.either.catsSyntaxEitherId
import com.amurplatform.features.BlockchainFeatures
import com.amurplatform.features.FeatureProvider._
import com.amurplatform.metrics.Instrumented
import com.amurplatform.mining.MiningConstraint
import com.amurplatform.settings.FunctionalitySettings
import com.amurplatform.state._
import com.amurplatform.state.patch.{CancelAllLeases, CancelInvalidLeaseIn, CancelLeaseOverflow}
import com.amurplatform.state.reader.CompositeBlockchain.composite
import com.amurplatform.account.Address
import com.amurplatform.utils.ScorexLogging
import com.amurplatform.block.{Block, MicroBlock}
import com.amurplatform.transaction.ValidationError.ActivationError
import com.amurplatform.transaction.{Transaction, ValidationError}
object BlockDiffer extends ScorexLogging with Instrumented {
private def clearSponsorship(blockchain: Blockchain, portfolio: Portfolio, height: Int, fs: FunctionalitySettings): Portfolio = {
if (height >= Sponsorship.sponsoredFeesSwitchHeight(blockchain, fs)) {
val sponsoredAssets = portfolio.assets
.map {
case (assetId, totalFee) =>
(assetId, totalFee, blockchain.assetDescription(assetId))
}
.collect {
case (assetId, totalFee, Some(desc)) if desc.sponsorship > 0 =>
(assetId, totalFee, desc.sponsorship)
}
val unsponsoredPf = portfolio.copy(assets = portfolio.assets -- sponsoredAssets.map(_._1))
val sponsoredAmur = sponsoredAssets.map {
case (_, totalFee, baseFee) => Sponsorship.toAmur(totalFee, baseFee)
}.sum
unsponsoredPf.copy(balance = unsponsoredPf.balance + sponsoredAmur)
} else portfolio
}
def fromBlock[Constraint <: MiningConstraint](settings: FunctionalitySettings,
blockchain: Blockchain,
maybePrevBlock: Option[Block],
block: Block,
constraint: Constraint): Either[ValidationError, (Diff, Constraint)] = {
val blockSigner = block.signerData.generator.toAddress
val stateHeight = blockchain.height
// height switch is next after activation
val ng4060switchHeight = blockchain.featureActivationHeight(BlockchainFeatures.NG.id).getOrElse(Int.MaxValue)
lazy val prevBlockFeeDistr: Option[Diff] =
if (stateHeight > ng4060switchHeight)
maybePrevBlock.map(
prevBlock =>
Diff.empty.copy(portfolios = Map(blockSigner ->
clearSponsorship(blockchain, prevBlock.prevBlockFeePart(), stateHeight, settings))))
else None
lazy val currentBlockFeeDistr =
if (stateHeight < ng4060switchHeight)
Some(Diff.empty.copy(portfolios = Map(blockSigner -> block.feesPortfolio())))
else
None
val prevBlockTimestamp = maybePrevBlock.map(_.timestamp)
for {
_ <- block.signaturesValid()
r <- apply(
settings,
blockchain,
constraint,
prevBlockTimestamp,
block.signerData.generator,
prevBlockFeeDistr,
currentBlockFeeDistr,
block.timestamp,
block.transactionData,
1
)
} yield r
}
def fromMicroBlock[Constraint <: MiningConstraint](settings: FunctionalitySettings,
blockchain: Blockchain,
prevBlockTimestamp: Option[Long],
micro: MicroBlock,
timestamp: Long,
constraint: Constraint): Either[ValidationError, (Diff, Constraint)] = {
for {
// microblocks are processed within block which is next after 40-only-block which goes on top of activated height
_ <- Either.cond(blockchain.activatedFeatures.contains(BlockchainFeatures.NG.id), (), ActivationError(s"MicroBlocks are not yet activated"))
_ <- micro.signaturesValid()
r <- apply(
settings,
blockchain,
constraint,
prevBlockTimestamp,
micro.sender,
None,
None,
timestamp,
micro.transactionData,
0
)
} yield r
}
private def apply[Constraint <: MiningConstraint](settings: FunctionalitySettings,
blockchain: Blockchain,
initConstraint: Constraint,
prevBlockTimestamp: Option[Long],
blockGenerator: Address,
prevBlockFeeDistr: Option[Diff],
currentBlockFeeDistr: Option[Diff],
timestamp: Long,
txs: Seq[Transaction],
heightDiff: Int): Either[ValidationError, (Diff, Constraint)] = {
def updateConstraint(constraint: Constraint, blockchain: Blockchain, tx: Transaction): Constraint =
constraint.put(blockchain, tx).asInstanceOf[Constraint]
val currentBlockHeight = blockchain.height + heightDiff
val txDiffer = TransactionDiffer(settings, prevBlockTimestamp, timestamp, currentBlockHeight) _
val txsDiffEi = currentBlockFeeDistr match {
case Some(feedistr) =>
val initDiff = Monoid.combine(prevBlockFeeDistr.orEmpty, feedistr)
txs.foldLeft((initDiff, initConstraint).asRight[ValidationError]) {
case (r @ Left(_), _) => r
case (Right((currDiff, currConstraint)), tx) =>
val updatedBlockchain = composite(blockchain, currDiff)
val updatedConstraint = updateConstraint(currConstraint, updatedBlockchain, tx)
if (updatedConstraint.isOverfilled) Left(ValidationError.GenericError(s"Limit of txs was reached: $initConstraint -> $updatedConstraint"))
else
txDiffer(updatedBlockchain, tx).map { newDiff =>
(currDiff.combine(newDiff), updatedConstraint)
}
}
case None =>
txs.foldLeft((prevBlockFeeDistr.orEmpty, initConstraint).asRight[ValidationError]) {
case (r @ Left(_), _) => r
case (Right((currDiff, currConstraint)), tx) =>
val updatedBlockchain = composite(blockchain, currDiff)
val updatedConstraint = updateConstraint(currConstraint, updatedBlockchain, tx)
if (updatedConstraint.isOverfilled) Left(ValidationError.GenericError(s"Limit of txs was reached: $initConstraint -> $updatedConstraint"))
else
txDiffer(updatedBlockchain, tx).map { newDiff =>
val updatedPortfolios = newDiff.portfolios.combine(
Map(blockGenerator -> clearSponsorship(blockchain, tx.feeDiff().multiply(Block.CurrentBlockFeePart), currentBlockHeight, settings))
)
(currDiff.combine(newDiff.copy(portfolios = updatedPortfolios)), updatedConstraint)
}
}
}
txsDiffEi.map {
case (d, constraint) =>
val diffWithCancelledLeases =
if (currentBlockHeight == settings.resetEffectiveBalancesAtHeight)
Monoid.combine(d, CancelAllLeases(composite(blockchain, d)))
else d
val diffWithLeasePatches =
if (currentBlockHeight == settings.blockVersion3AfterHeight)
Monoid.combine(diffWithCancelledLeases, CancelLeaseOverflow(composite(blockchain, diffWithCancelledLeases)))
else diffWithCancelledLeases
val diffWithCancelledLeaseIns =
if (blockchain.featureActivationHeight(BlockchainFeatures.DataTransaction.id).contains(currentBlockHeight))
Monoid.combine(diffWithLeasePatches, CancelInvalidLeaseIn(composite(blockchain, diffWithLeasePatches)))
else diffWithLeasePatches
(diffWithCancelledLeaseIns, constraint)
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/settings/Constants.scala
|
package com.amurplatform.settings
import com.amurplatform.Version
import com.amurplatform.utils.ScorexLogging
/**
* System constants here.
*/
object Constants extends ScorexLogging {
val ApplicationName = "amur"
val AgentName = s"Amur v${Version.VersionString}"
val UnitsInWave = 100000000L
val TotalAmur = 100000000L
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/FairPoSTestSuite.scala
|
package com.amurplatform.it.sync
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{CancelAfterFailure, FunSuite}
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.it.transactions.NodesFromDocker
import com.amurplatform.it.util._
import scala.concurrent.duration._
class FairPoSTestSuite extends FunSuite with CancelAfterFailure with NodesFromDocker {
import FairPoSTestSuite._
override protected def nodeConfigs: Seq[Config] = Configs
private val transferFee = 0.001.amur
private val transferAmount = 1000.amur
test("blockchain grows with FairPoS activated") {
nodes.head.waitForHeight(10, 3.minutes)
val txId = nodes.head.transfer(nodes.head.address, nodes.last.address, transferAmount, transferFee).id
nodes.last.waitForTransaction(txId)
val heightAfterTransfer = nodes.head.height
nodes.head.waitForHeight(heightAfterTransfer + 20, 10.minutes)
}
}
object FairPoSTestSuite {
import com.amurplatform.it.NodeConfigs._
private val microblockActivationHeight = 0
private val fairPoSActivationHeight = 10
private val config =
ConfigFactory.parseString(s"""
|amur {
| blockchain.custom {
| functionality {
| pre-activated-features {1 = $microblockActivationHeight, 8 = $fairPoSActivationHeight}
| generation-balance-depth-from-50-to-1000-after-height = 1000
| }
| }
| miner.quorum = 1
|}""".stripMargin)
val Configs: Seq[Config] = Default.map(config.withFallback(_)).take(4)
}
|
amur-host/node
|
lang/shared/src/main/scala/com/amurplatform/lang/v1/hacks/package.scala
|
package com.amurplatform.lang
import com.amurplatform.lang.v1.BaseGlobal
package object hacks {
private[lang] val Global: BaseGlobal = com.amurplatform.lang.Global // Hack for IDEA
}
|
amur-host/node
|
generator/src/main/scala/com.amurplatform.generator/DynamicWideTransactionGenerator.scala
|
package com.amurplatform.generator
import java.util.concurrent.atomic.AtomicReference
import cats.Show
import com.amurplatform.generator.DynamicWideTransactionGenerator.Settings
import com.amurplatform.generator.utils.Gen
import com.amurplatform.account.PrivateKeyAccount
import com.amurplatform.transaction.Transaction
class DynamicWideTransactionGenerator(settings: Settings, accounts: Seq[PrivateKeyAccount]) extends TransactionGenerator {
require(accounts.nonEmpty)
private val nextTxsNumber = new AtomicReference[Double](settings.start)
private val limitedRecipientGen = Gen.address(settings.limitDestAccounts)
override def next(): Iterator[Transaction] = {
val currTxsNumber = nextTxsNumber.getAndUpdate { x =>
val newValue = x + settings.growAdder
settings.maxTxsPerRequest.foldLeft(newValue)(Math.min(_, _))
}.toInt
Gen.txs(settings.minFee, settings.maxFee, accounts, limitedRecipientGen).take(currTxsNumber)
}
}
object DynamicWideTransactionGenerator {
case class Settings(start: Int, growAdder: Double, maxTxsPerRequest: Option[Int], limitDestAccounts: Option[Int], minFee: Long, maxFee: Long) {
require(start >= 1)
}
object Settings {
implicit val toPrintable: Show[Settings] = { x =>
import x._
s"""txs at start: $start
|grow adder: $growAdder
|max txs: $maxTxsPerRequest
|limit destination accounts: $limitDestAccounts
|min fee: $minFee
|max fee: $maxFee""".stripMargin
}
}
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/matcher/OrderExclusionTestSuite.scala
|
<filename>it/src/test/scala/com/amurplatform/it/sync/matcher/OrderExclusionTestSuite.scala
package com.amurplatform.it.sync.matcher
import com.typesafe.config.{Config, ConfigFactory}
import com.amurplatform.it._
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.it.api.SyncMatcherHttpApi._
import com.amurplatform.it.transactions.NodesFromDocker
import com.amurplatform.it.util._
import com.amurplatform.state.ByteStr
import com.amurplatform.transaction.assets.exchange.{AssetPair, Order, OrderType}
import org.scalatest.{BeforeAndAfterAll, CancelAfterFailure, FreeSpec, Matchers}
import scala.concurrent.duration._
import scala.util.Random
class OrderExclusionTestSuite
extends FreeSpec
with Matchers
with BeforeAndAfterAll
with CancelAfterFailure
with ReportingTestName
with NodesFromDocker {
import OrderExclusionTestSuite._
override protected def nodeConfigs: Seq[Config] = Configs
private def matcherNode = nodes.head
private def aliceNode = nodes(1)
"check order execution" - {
// Alice issues new asset
val aliceAsset =
aliceNode.issue(aliceNode.address, "AliceCoin", "AliceCoin for matcher's tests", AssetQuantity, 0, reissuable = false, 100000000L).id
nodes.waitForHeightAriseAndTxPresent(aliceAsset)
val aliceAmurPair = AssetPair(ByteStr.decodeBase58(aliceAsset).toOption, None)
// check assets's balances
aliceNode.assertAssetBalance(aliceNode.address, aliceAsset, AssetQuantity)
aliceNode.assertAssetBalance(matcherNode.address, aliceAsset, 0)
"sell order could be placed and status it's correct" in {
// Alice places sell order
val aliceOrder = matcherNode
.placeOrder(aliceNode, aliceAmurPair, OrderType.SELL, 2.amur * Order.PriceConstant, 500, 2: Byte, 70.seconds)
aliceOrder.status shouldBe "OrderAccepted"
val orderId = aliceOrder.message.id
// Alice checks that the order in order book
matcherNode.orderStatus(orderId, aliceAmurPair).status shouldBe "Accepted"
matcherNode.fullOrderHistory(aliceNode).head.status shouldBe "Accepted"
// Alice check that order is correct
val orders = matcherNode.orderBook(aliceAmurPair)
orders.asks.head.amount shouldBe 500
orders.asks.head.price shouldBe 2.amur * Order.PriceConstant
// sell order should be in the aliceNode orderbook
matcherNode.fullOrderHistory(aliceNode).head.status shouldBe "Accepted"
//wait for expiration of order
matcherNode.waitOrderStatus(aliceAmurPair, orderId, "Cancelled", 2.minutes)
matcherNode.fullOrderHistory(aliceNode).head.status shouldBe "Cancelled"
}
}
}
object OrderExclusionTestSuite {
val ForbiddenAssetId = "FdbnAsset"
import NodeConfigs.Default
private val matcherConfig = ConfigFactory.parseString(s"""
|amur {
| matcher {
| enable = yes
| account = <KEY>
| bind-address = "0.0.0.0"
| order-match-tx-fee = 300000
| blacklisted-assets = [$ForbiddenAssetId]
| order-cleanup-interval = 20s
| }
| rest-api {
| enable = yes
| api-key-hash = <KEY>
| }
| miner.enable=no
|}""".stripMargin)
private val nonGeneratingPeersConfig = ConfigFactory.parseString(
"""amur {
| matcher.order-cleanup-interval = 30s
| miner.enable=no
|}""".stripMargin
)
val AssetQuantity: Long = 1000
val MatcherFee: Long = 300000
val TransactionFee: Long = 300000
// val Amur: Long = 100000000L
private val Configs: Seq[Config] = {
val notMatchingNodes = Random.shuffle(Default.init).take(3)
Seq(matcherConfig.withFallback(Default.last), notMatchingNodes.head) ++
notMatchingNodes.tail.map(nonGeneratingPeersConfig.withFallback)
}
}
|
amur-host/node
|
generator/src/main/scala/com.amurplatform.generator/TransactionGenerator.scala
|
package com.amurplatform.generator
import com.amurplatform.transaction.Transaction
trait TransactionGenerator extends Iterator[Iterator[Transaction]] {
override val hasNext = true
}
|
amur-host/node
|
src/main/scala/com/amurplatform/transaction/smart/BlockchainContext.scala
|
package com.amurplatform.transaction.smart
import cats.kernel.Monoid
import com.amurplatform.lang.Global
import com.amurplatform.lang.v1.evaluator.ctx.EvaluationContext
import com.amurplatform.lang.v1.evaluator.ctx.impl.amur.AmurContext
import com.amurplatform.lang.v1.evaluator.ctx.impl.{CryptoContext, PureContext}
import com.amurplatform.state._
import com.amurplatform.transaction._
import com.amurplatform.transaction.assets.exchange.Order
import monix.eval.Coeval
import shapeless._
object BlockchainContext {
private val baseContext = Monoid.combine(PureContext.ctx, CryptoContext.build(Global)).evaluationContext
def build(nByte: Byte, in: Coeval[Transaction :+: Order :+: CNil], h: Coeval[Int], blockchain: Blockchain): EvaluationContext =
Monoid.combine(baseContext, AmurContext.build(new AmurEnvironment(nByte, in, h, blockchain)).evaluationContext)
}
|
amur-host/node
|
lang/shared/src/main/scala/com/amurplatform/lang/v1/task/CoevalRef.scala
|
<filename>lang/shared/src/main/scala/com/amurplatform/lang/v1/task/CoevalRef.scala
package com.amurplatform.lang.v1.task
import monix.eval.Coeval
import monix.execution.atomic.{Atomic, _}
sealed trait CoevalRef[A] {
def read: Coeval[A]
def write(a: A): Coeval[Unit]
}
object CoevalRef {
def of[A, R <: Atomic[A]](a: A)(implicit ab: AtomicBuilder[A, R]): CoevalRef[A] = {
new CoevalRef[A] {
private val atom: Atomic[A] = Atomic(a)
override def read: Coeval[A] = Coeval.delay(atom.get)
override def write(a: A): Coeval[Unit] = Coeval.delay(atom.set(a))
}
}
}
|
amur-host/node
|
src/main/scala/com/amurplatform/matcher/smart/MatcherScriptRunner.scala
|
package com.amurplatform.matcher.smart
import com.amurplatform.account.AddressScheme
import com.amurplatform.lang.v1.evaluator.EvaluatorV1
import com.amurplatform.lang.v1.evaluator.ctx.EvaluationContext
import com.amurplatform.transaction.assets.exchange.Order
import com.amurplatform.transaction.smart.script.Script
import monix.eval.Coeval
import cats.implicits._
object MatcherScriptRunner {
def apply[A](script: Script, order: Order): (EvaluationContext, Either[String, A]) = script match {
case Script.Expr(expr) =>
val ctx = MatcherContext.build(AddressScheme.current.chainId, Coeval.evalOnce(order))
EvaluatorV1[A](ctx, expr)
case _ => (EvaluationContext.empty, "Unsupported script version".asLeft[A])
}
}
|
amur-host/node
|
it/src/main/scala/com/amurplatform/it/ExternalNode.scala
|
package com.amurplatform.it
import java.net.{InetSocketAddress, URL}
import com.typesafe.config.Config
class ExternalNode(config: Config) extends Node(config) {
override def nodeApiEndpoint = new URL(config.getString("node-api-endpoint"))
override def matcherApiEndpoint = new URL(config.getString("matcher-api-endpoint"))
override def apiKey = config.getString("api-key")
override def networkAddress = {
val hostAndPort = "([^:]+)\\:([\\d+])+".r
val hostAndPort(host, port) = config.getString("network-address")
new InetSocketAddress(host, port.toInt)
}
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/smartcontract/ScriptExecutionErrorSuite.scala
|
package com.amurplatform.it.sync.smartcontract
import com.amurplatform.it.api.SyncHttpApi.assertBadRequestAndResponse
import com.amurplatform.it.sync.minFee
import com.amurplatform.it.transactions.BaseTransactionSuite
import com.amurplatform.lang.v1.FunctionHeader
import com.amurplatform.lang.v1.compiler.Terms
import com.amurplatform.transaction.smart.SetScriptTransaction
import com.amurplatform.transaction.smart.script.v1.ScriptV1
import org.scalatest.CancelAfterFailure
import play.api.libs.json.JsNumber
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.state._
class ScriptExecutionErrorSuite extends BaseTransactionSuite with CancelAfterFailure {
private val acc0 = pkByAddress(firstAddress)
private val acc1 = pkByAddress(secondAddress)
test("wrong type of script return value") {
val script = ScriptV1(
Terms
.FUNCTION_CALL(
FunctionHeader.Native(100),
List(
Terms.CONST_LONG(3),
Terms.CONST_LONG(2)
)
)
).explicitGet()
val tx = sender
.signAndBroadcast(
SetScriptTransaction
.selfSigned(SetScriptTransaction.supportedVersions.head, acc0, Some(script), minFee, System.currentTimeMillis())
.explicitGet()
.json() + ("type" -> JsNumber(SetScriptTransaction.typeId.toInt)))
.id
nodes.waitForHeightAriseAndTxPresent(tx)
assertBadRequestAndResponse(
sender.transfer(acc0.address, acc1.address, 1000, minFee, None, None),
"Probably script does not return boolean"
)
}
}
|
amur-host/node
|
src/test/scala/com/amurplatform/TestHelpers.scala
|
<gh_stars>1-10
package com.amurplatform
import java.io.IOException
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileVisitResult, Files, Path, SimpleFileVisitor}
import com.amurplatform.settings.{GenesisSettings, GenesisTransactionSettings}
import com.amurplatform.account.Address
import scala.concurrent.duration._
object TestHelpers {
def genesisSettings(balances: Map[Address, Long], blockTimestamp: Long = System.currentTimeMillis()): GenesisSettings = {
val totalAmount = balances.values.sum
val transactions = balances.map {
case (account, amount) =>
GenesisTransactionSettings(account.address, amount)
}.toSeq
GenesisSettings(blockTimestamp, blockTimestamp, totalAmount, None, transactions, 1000, 60.seconds)
}
def deleteRecursively(path: Path): Unit = Files.walkFileTree(
path,
new SimpleFileVisitor[Path] {
override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
Option(exc).fold {
Files.delete(dir)
FileVisitResult.CONTINUE
}(throw _)
}
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
Files.delete(file)
FileVisitResult.CONTINUE
}
}
)
}
|
amur-host/node
|
it/src/test/scala/com/amurplatform/it/sync/matcher/TradersTestSuite.scala
|
package com.amurplatform.it.sync.matcher
import com.typesafe.config.{Config, ConfigFactory}
import com.amurplatform.it.ReportingTestName
import com.amurplatform.it.api.SyncHttpApi._
import com.amurplatform.it.api.SyncMatcherHttpApi._
import com.amurplatform.it.transactions.NodesFromDocker
import com.amurplatform.it.util._
import com.amurplatform.matcher.market.MatcherActor
import com.amurplatform.matcher.model.MatcherModel.Price
import com.amurplatform.state.ByteStr
import com.amurplatform.transaction.assets.exchange.{AssetPair, Order, OrderType}
import org.scalatest.{BeforeAndAfterAll, CancelAfterFailure, FreeSpec, Matchers}
import scala.util.Random
class TradersTestSuite extends FreeSpec with Matchers with BeforeAndAfterAll with CancelAfterFailure with NodesFromDocker with ReportingTestName {
import TradersTestSuite._
override protected def nodeConfigs: Seq[Config] = Configs
private def matcherNode = nodes.head
private def aliceNode = nodes(1)
private def bobNode = nodes(2)
private def orderVersion = (Random.nextInt(2) + 1).toByte
"Verifications of tricky ordering cases" - {
// Alice issues new asset
val aliceAsset =
aliceNode.issue(aliceNode.address, "AliceCoin", "AliceCoin for matcher's tests", AssetQuantity, 0, reissuable = false, 100000000L).id
nodes.waitForHeightAriseAndTxPresent(aliceAsset)
// val aliceAmurPair = AssetPair(ByteStr.decodeBase58(aliceAsset).toOption, None)
// Wait for balance on Alice's account
aliceNode.assertAssetBalance(aliceNode.address, aliceAsset, AssetQuantity)
matcherNode.assertAssetBalance(matcherNode.address, aliceAsset, 0)
bobNode.assertAssetBalance(bobNode.address, aliceAsset, 0)
// Bob issues a new asset
val bobAssetQuantity = 10000
val bobNewAsset = bobNode.issue(bobNode.address, "BobCoin3", "Bob's asset", bobAssetQuantity, 0, false, 100000000L).id
nodes.waitForHeightAriseAndTxPresent(bobNewAsset)
val bobAssetId = ByteStr.decodeBase58(bobNewAsset).get
val aliceAssetId = ByteStr.decodeBase58(aliceAsset).get
val bobAmurPair = AssetPair(
amountAsset = Some(bobAssetId),
priceAsset = None
)
val twoAssetsPair =
if (MatcherActor.compare(Some(bobAssetId.arr), Some(aliceAssetId.arr)) < 0)
AssetPair(
amountAsset = Some(aliceAssetId),
priceAsset = Some(bobAssetId)
)
else
AssetPair(
amountAsset = Some(bobAssetId),
priceAsset = Some(aliceAssetId)
)
nodes.waitForHeightArise()
bobNode.assertAssetBalance(bobNode.address, bobNewAsset, bobAssetQuantity)
"matcher should respond with Public key" in {
matcherNode.matcherGet("/matcher").getResponseBody.stripPrefix("\"").stripSuffix("\"") shouldBe matcherNode.publicKeyStr
}
"owner moves assets/amur to another account and order become an invalid" ignore {
// todo: reactivate after balance watcher is reimplemented
// Could not work sometimes because of NODE-546
"order with assets" - {
"moved assets, insufficient assets" in {
val oldestOrderId = bobPlacesAssetOrder(8000, twoAssetsPair, bobNewAsset)
val newestOrderId = bobPlacesAssetOrder(1000, twoAssetsPair, bobNewAsset)
val transferId = bobNode.transfer(bobNode.address, aliceNode.address, 3050, TransactionFee, Some(bobNewAsset), None).id
nodes.waitForHeightAriseAndTxPresent(transferId)
withClue(s"The oldest order '$oldestOrderId' was cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, oldestOrderId, "Cancelled")
}
withClue(s"The newest order '$newestOrderId' is still active") {
matcherNode.orderStatus(newestOrderId, bobAmurPair).status shouldBe "Accepted"
}
// Cleanup
nodes.waitForHeightArise()
matcherNode.cancelOrder(bobNode, twoAssetsPair, Some(newestOrderId)).status should be("OrderCanceled")
val transferBackId = aliceNode.transfer(aliceNode.address, bobNode.address, 3050, TransactionFee, Some(bobNewAsset), None).id
nodes.waitForHeightAriseAndTxPresent(transferBackId)
}
"leased amur, insufficient fee" in {
val bobBalance = bobNode.accountBalances(bobNode.address)._1
val oldestOrderId = bobPlacesAssetOrder(1000, twoAssetsPair, bobNewAsset)
val newestOrderId = bobPlacesAssetOrder(1000, twoAssetsPair, bobNewAsset)
// TransactionFee for leasing, MatcherFee for one order
val leaseAmount = bobBalance - TransactionFee - MatcherFee
val leaseId = bobNode.lease(bobNode.address, aliceNode.address, leaseAmount, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(leaseId)
withClue(s"The oldest order '$oldestOrderId' was cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, oldestOrderId, "Cancelled")
}
withClue(s"The newest order '$newestOrderId' is still active") {
matcherNode.orderStatus(newestOrderId, bobAmurPair).status shouldBe "Accepted"
}
// Cleanup
nodes.waitForHeightArise()
matcherNode.cancelOrder(bobNode, twoAssetsPair, Some(newestOrderId)).status should be("OrderCanceled")
val cancelLeaseId = bobNode.cancelLease(bobNode.address, leaseId, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(cancelLeaseId)
}
"moved amur, insufficient fee" in {
val bobBalance = matcherNode.accountBalances(bobNode.address)._1
val oldestOrderId = bobPlacesAssetOrder(1000, twoAssetsPair, bobNewAsset)
val newestOrderId = bobPlacesAssetOrder(1000, twoAssetsPair, bobNewAsset)
// TransactionFee for leasing, MatcherFee for one order
val transferAmount = bobBalance - TransactionFee - MatcherFee
val transferId = bobNode.transfer(bobNode.address, aliceNode.address, transferAmount, TransactionFee, None, None).id
nodes.waitForHeightAriseAndTxPresent(transferId)
withClue(s"The oldest order '$oldestOrderId' was cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, oldestOrderId, "Cancelled")
}
withClue(s"The newest order '$newestOrderId' is still active") {
matcherNode.orderStatus(newestOrderId, bobAmurPair).status shouldBe "Accepted"
}
// Cleanup
nodes.waitForHeightArise()
matcherNode.cancelOrder(bobNode, twoAssetsPair, Some(newestOrderId)).status should be("OrderCanceled")
val transferBackId = aliceNode.transfer(aliceNode.address, bobNode.address, transferAmount, TransactionFee, None, None).id
nodes.waitForHeightAriseAndTxPresent(transferBackId)
}
}
"order with amur" - {
"leased amur, insufficient fee for one ExchangeTransaction" in {
// Amount of amur in order is smaller than fee
val bobBalance = bobNode.accountBalances(bobNode.address)._1
val oldestOrderId = bobPlacesWaveOrder(bobAmurPair, 10.amur * Order.PriceConstant, 1)
val newestOrderId = bobPlacesWaveOrder(bobAmurPair, 10.amur * Order.PriceConstant, 1)
// waitForOrderStatus(matcherNode, bobAssetIdRaw, id, "Accepted")
val leaseAmount = bobBalance - TransactionFee - 10.amur - MatcherFee
val leaseId = bobNode.lease(bobNode.address, aliceNode.address, leaseAmount, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(leaseId)
withClue(s"The newest order '$oldestOrderId' is Cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, oldestOrderId, "Cancelled")
}
withClue(s"The newest order '$newestOrderId' is still active") {
matcherNode.orderStatus(newestOrderId, bobAmurPair).status shouldBe "Accepted"
}
// Cleanup
nodes.waitForHeightArise()
matcherNode.cancelOrder(bobNode, bobAmurPair, Some(newestOrderId)).status should be("OrderCanceled")
val cancelLeaseId = bobNode.cancelLease(bobNode.address, leaseId, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(cancelLeaseId)
}
"leased amur, insufficient amur" in {
val bobBalance = bobNode.accountBalances(bobNode.address)._1
val price = 1.amur
val order2 = bobPlacesWaveOrder(bobAmurPair, price * Order.PriceConstant, 1)
val leaseAmount = bobBalance - TransactionFee - price / 2
val leaseId = bobNode.lease(bobNode.address, aliceNode.address, leaseAmount, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(leaseId)
withClue(s"The order '$order2' was cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, order2, "Cancelled")
}
// Cleanup
nodes.waitForHeightArise()
val cancelLeaseId = bobNode.cancelLease(bobNode.address, leaseId, TransactionFee).id
nodes.waitForHeightAriseAndTxPresent(cancelLeaseId)
}
"moved amur, insufficient fee" in {
// Amount of amur in order is smaller than fee
val bobBalance = bobNode.accountBalances(bobNode.address)._1
val price = TransactionFee / 2
val order3 = bobPlacesWaveOrder(bobAmurPair, price * Order.PriceConstant, 1)
val transferAmount = bobBalance - TransactionFee - price
val txId = bobNode.transfer(bobNode.address, aliceNode.address, transferAmount, TransactionFee, None, None).id
nodes.waitForHeightAriseAndTxPresent(txId)
withClue(s"The order '$order3' was cancelled") {
matcherNode.waitOrderStatus(bobAmurPair, order3, "Cancelled")
}
// Cleanup
nodes.waitForHeightArise()
val transferBackId = aliceNode.transfer(aliceNode.address, bobNode.address, transferAmount, TransactionFee, None, None).id
nodes.waitForHeightAriseAndTxPresent(transferBackId)
}
}
}
}
def bobPlacesWaveOrder(assetPair: AssetPair, price: Price, amount: Long): String = {
val bobOrder = matcherNode.prepareOrder(bobNode, assetPair, OrderType.BUY, price, amount, orderVersion)
val order = matcherNode.placeOrder(bobOrder).message.id
matcherNode.waitOrderStatus(assetPair, order, "Accepted")
order
}
def bobPlacesAssetOrder(bobCoinAmount: Int, twoAssetsPair: AssetPair, assetId: String): String = {
val decodedAsset = ByteStr.decodeBase58(assetId).get
val bobOrder = if (twoAssetsPair.amountAsset.contains(decodedAsset)) {
matcherNode.prepareOrder(bobNode, twoAssetsPair, OrderType.SELL, 1 * Order.PriceConstant, bobCoinAmount, orderVersion)
} else {
matcherNode.prepareOrder(bobNode, twoAssetsPair, OrderType.BUY, bobCoinAmount * Order.PriceConstant, 1, orderVersion)
}
val order = matcherNode.placeOrder(bobOrder)
matcherNode.waitOrderStatus(twoAssetsPair, order.message.id, "Accepted")
order.message.id
}
}
object TradersTestSuite {
import ConfigFactory._
import com.amurplatform.it.NodeConfigs._
private val ForbiddenAssetId = "FdbnAsset"
private val AssetQuantity = 1000
private val MatcherFee = 300000
private val TransactionFee = 300000
private val minerDisabled = parseString("amur.miner.enable = no")
private val matcherConfig = parseString(s"""
|amur.matcher {
| enable = yes
| account = <KEY>
| bind-address = "0.0.0.0"
| order-match-tx-fee = 300000
| blacklisted-assets = ["$ForbiddenAssetId"]
| balance-watching.enable = yes
|}""".stripMargin)
private val Configs: Seq[Config] = (Default.last +: Random.shuffle(Default.init).take(3))
.zip(Seq(matcherConfig, minerDisabled, minerDisabled, empty()))
.map { case (n, o) => o.withFallback(n) }
}
|
cwensel/parboiled
|
parboiled-scala/src/test/scala/org/parboiled/scala/LoopedReductionRuleTest.scala
|
<gh_stars>0
/*
* Copyright (C) 2009-2011 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled.scala
import org.testng.annotations.Test
import org.scalatest.testng.TestNGSuiteLike
import org.testng.Assert.assertEquals
import testing.ParboiledTest
class LoopedReductionRuleTest extends ParboiledTest with TestNGSuiteLike {
// def fail(message: String): Nothing = super.fail(message)(null)
type Result = Expression
trait Expression {
def value: Double
}
case class IntExpression(intValue: Int) extends Expression {
def value = intValue.toDouble
}
case class DoubleExpression(value: Double) extends Expression
class TestParser extends Parser {
def IntsAndDoubles: Rule1[DoubleExpression] = rule {
"1" ~> (s => IntExpression(s.toInt)) ~ oneOrMore(" + " ~ AddDouble)
}
def IntsAndMaybeDoubles: Rule1[Expression] = rule {
"1" ~> (s => IntExpression(s.toInt)) ~ zeroOrMore(" + " ~ AddDouble)
}
def AddDouble: ReductionRule1[Expression, DoubleExpression] = rule {
"0.3" ~> (s => DoubleExpression(s.toDouble)) ~~> ((l: Expression, r: DoubleExpression) => DoubleExpression(l.value + r.value))
}
}
val parser = new TestParser() {
override val buildParseTree = true
}
@Test
def testLoopedReductionRule() {
parse(ReportingParseRunner(parser.IntsAndDoubles), "1 + 0.3 + 0.3 + 0.3") {
assertEquals(parseTree,
"""[IntsAndDoubles, {DoubleExpression(1.9000000000000001)}] '1 + 0.3 + 0.3 + 0.3'
['1'] '1'
[OneOrMore, {DoubleExpression(1.9000000000000001)}] ' + 0.3 + 0.3 + 0.3'
[Sequence, {DoubleExpression(1.3)}] ' + 0.3'
[" + ", {IntExpression(1)}] ' + '
[AddDouble, {DoubleExpression(1.3)}] '0.3'
["0.3", {IntExpression(1)}] '0.3'
[Sequence, {DoubleExpression(1.6)}] ' + 0.3'
[" + ", {DoubleExpression(1.3)}] ' + '
[AddDouble, {DoubleExpression(1.6)}] '0.3'
["0.3", {DoubleExpression(1.3)}] '0.3'
[Sequence, {DoubleExpression(1.9000000000000001)}] ' + 0.3'
[" + ", {DoubleExpression(1.6)}] ' + '
[AddDouble, {DoubleExpression(1.9000000000000001)}] '0.3'
["0.3", {DoubleExpression(1.6)}] '0.3'
""".stripMargin)
}
}
}
|
cwensel/parboiled
|
parboiled-scala/src/test/scala/org/parboiled/scala/BugIn101Test.scala
|
<reponame>cwensel/parboiled
/*
* Copyright (C) 2009-2011 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled.scala
import org.testng.annotations.Test
import org.scalatest.testng.TestNGSuiteLike
import org.testng.Assert.assertEquals
import testing.ParboiledTest
class BugIn101Test extends ParboiledTest with TestNGSuiteLike {
// def fail(message: String): Nothing = super.fail(message)(null)
type Result = Int
class MyParser extends Parser {
def A = rule { nTimes(1, "a") }
}
val parser = new MyParser() {
override val buildParseTree = true
}
@Test
def testNTimes() {
parse(ReportingParseRunner(parser.A), "any") {
assertEquals(parseTree,"[A] 'a'\n")
}
}
}
|
ryan-williams/hdf5-cloud
|
10x_hdf5_to_parquet.scala
|
// ~/sw/spark-2.2.1-bin-hadoop2.7/bin/spark-shell --master local[2] --jars ~/.m2/repository/com/tom_e_white/hdf5-java-cloud/0.0.1-SNAPSHOT/hdf5-java-cloud-0.0.1-SNAPSHOT.jar
import hammerlab.path._
import com.tom_e_white.hdf5_java_cloud.ArrayUtils
import com.tom_e_white.hdf5_java_cloud.NioReadOnlyRandomAccessFile
import org.apache.spark.mllib.linalg.{SparseVector, Vector, Vectors}
import org.apache.spark.sql.{Row, SaveMode}
import org.apache.spark.sql.types._
import ucar.nc2.NetcdfFile
val t0 = System.currentTimeMillis()
val file = "files/1M_neurons_filtered_gene_bc_matrices_h5.h5" // change to "gs://..." for GCS
val output = "10x_parquet"
val totalShards = 320
// Read the k'th shard of the HDF5 file and return a sequence of barcode-vector tuples. Each shard must fit in memory.
def readShard(k: Int): Seq[(String, Vector)] = {
val location = file
val path = Path(location)
val raf = new NioReadOnlyRandomAccessFile(path)
val ncfile = NetcdfFile.open(raf, location, null, null)
val indptr = ncfile.findVariable("/mm10/indptr")
val indices = ncfile.findVariable("/mm10/indices")
val data = ncfile.findVariable("/mm10/data")
val barcodes = ncfile.findVariable("/mm10/barcodes")
val shape = ncfile.findVariable("/mm10/shape")
val numFeatures = shape.read.getInt(0)
val numRows = barcodes.getShape(0)
val start = k * numRows / (totalShards - 1)
var end = 0
if (k == (totalShards - 1)) end = numRows
else end = (k + 1) * numRows / (totalShards - 1)
val barcodeData: Array[String] = ArrayUtils.index(barcodes, start, end + 1)
.copyToNDJavaArray().asInstanceOf[Array[Array[Char]]]
.map(x => x.mkString)
val indptrData: Array[Long] = ArrayUtils.index(indptr, start, end + 1).getStorage.asInstanceOf[Array[Long]]
val firstIndptr: Long = indptrData(0)
val lastIndptr: Long = indptrData.last
if (firstIndptr == lastIndptr) {
return Seq()
}
val indicesData: Array[Long] = ArrayUtils.index(indices, firstIndptr, lastIndptr).getStorage.asInstanceOf[Array[Long]]
val dataData: Array[Int] = ArrayUtils.index(data, firstIndptr, lastIndptr).getStorage.asInstanceOf[Array[Int]]
(0 until end - start).map(i => {
val barcode = barcodeData(i)
val indicesSlice = indicesData.slice((indptrData(i) - firstIndptr).toInt, (indptrData(i + 1) - firstIndptr).toInt)
val dataSlice = dataData.slice((indptrData(i) - firstIndptr).toInt, (indptrData(i + 1) - firstIndptr).toInt)
val indexDataPairs = indicesSlice.zip(dataSlice)
.map {case (k: Long, v: Int) => (k.toInt, v.toDouble)} // Vector is (Int, Double)
val vec = Vectors.sparse(numFeatures, indexDataPairs)
(barcode, vec)
})
}
val actualShards = totalShards // change this to test on a subset
val shardIndexes = sc.parallelize(0 until actualShards, totalShards)
val rows = shardIndexes.flatMap(readShard(_)).map {case (id, vec) => Row(id, vec.asInstanceOf[SparseVector].indices, vec.asInstanceOf[SparseVector].values)}
val schema = StructType(
StructField("id", StringType, false) ::
StructField("idx", ArrayType(IntegerType, false), false) ::
StructField("quant", ArrayType(DoubleType, false), false) :: Nil)
val df = spark.createDataFrame(rows, schema)
df.write.mode(SaveMode.Overwrite).parquet(output)
val t1 = System.currentTimeMillis()
println("Elapsed time: " + ((t1 - t0) / 1000) + "s")
|
Essexwwz/heart-health-indicator
|
heart-health-indicator/src/main/scala/csvtolibsvmlogicalD.scala
|
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructField}
import org.apache.spark.sql.{Row, SparkSession}
import scala.collection.immutable.ListMap
import scala.collection.mutable.ArrayBuffer
object csvtolibsvmlogicalD {
def main(args:Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("csvreader")
.master("local")
.getOrCreate()
val sc = spark.sparkContext
val prim = spark.read.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat")
.option("header","true")
.option("inferSchema","true")
.load("data/heart_disease_health_indicators_BRFSS2015.csv")
prim.createOrReplaceTempView("resultsql")
val result = spark.sql("select HeartDiseaseorAttack, Stroke,Diabetes from resultsql where CholCheck =1")
//For cholesterol data, we need to eliminate samples that have not been tested for cholesterol
val colNames = result.columns
val cols = colNames.map(f => col(f).cast(DoubleType))
val resultTyped = result.select(cols:_*)
val resultFinal = resultTyped.withColumn("HeartDiseaseorAttack",col("HeartDiseaseorAttack").cast(IntegerType))
val fieldSeq: scala.collection.Seq[StructField] = resultFinal.schema.fields.toSeq.filter(f => f.dataType == DoubleType)
val fieldNameSeq: Seq[String] = fieldSeq.map(f => f.name)
val positionsArray: ArrayBuffer[LabeledPoint] = ArrayBuffer[LabeledPoint]()
resultFinal.collect().foreach{
row => positionsArray+=convertRowToLabeledPoint(row,fieldNameSeq,row.getAs("HeartDiseaseorAttack"));
}
val mRdd:RDD[LabeledPoint]= sc.parallelize(positionsArray)
MLUtils.saveAsLibSVMFile(mRdd, "libsvmDiseases")
val data = MLUtils.loadLibSVMFile(sc, "libsvmDiseases/part-00000")
val splits = data.randomSplit(Array(0.3, 0.7), seed = 2L)
val training = splits(1)
val test = splits(0)
// Run training algorithm to build the model
val model = new LogisticRegressionWithLBFGS()
.setNumClasses(2)
.run(training)
// Compute raw scores on the test set.
val predictionAndLabels = test.map { case LabeledPoint(label, features) =>
val prediction = model.predict(features)
(prediction, label)
}
// Get evaluation metrics.
val metrics = new MulticlassMetrics(predictionAndLabels)
val accuracy = metrics.accuracy
println(s"Accuracy = $accuracy")
// Save and load model
model.save(sc,"logicRegressionmodelD")
println(model.weights)
spark.close()
}
@throws(classOf[Exception])
private def convertRowToLabeledPoint(rowIn: Row, fieldNameSeq: Seq[String], label:Int): LabeledPoint =
{
try
{
val values: Map[String, Double] = rowIn.getValuesMap(fieldNameSeq)
val sortedValuesMap = ListMap(values.toSeq.sortBy(_._1): _*)
val rowValuesItr: Iterable[Double] = sortedValuesMap.values
val positionsArray: ArrayBuffer[Int] = ArrayBuffer[Int]()
val valuesArray: ArrayBuffer[Double] = ArrayBuffer[Double]()
var currentPosition: Int = 0
rowValuesItr.foreach
{
kv =>
if (kv > 0)
{
valuesArray += kv
positionsArray += currentPosition
}
currentPosition = currentPosition + 1;
}
val lp:LabeledPoint = new LabeledPoint(label, org.apache.spark.mllib.linalg.Vectors.sparse(positionsArray.size,positionsArray.toArray, valuesArray.toArray))
lp
}
catch
{
case ex: Exception =>
throw new Exception(ex)
}
}
}
|
Essexwwz/heart-health-indicator
|
heart-health-indicator/src/main/scala/logicalDiseases.scala
|
<filename>heart-health-indicator/src/main/scala/logicalDiseases.scala
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.SparkSession
object logicalDiseases {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("csvreader")
.master("local")
.getOrCreate()
val sc = spark.sparkContext
val data = MLUtils.loadLibSVMFile(sc, "libsvmDiseases/part-00000")
val splits = data.randomSplit(Array(0.3, 0.7), seed = 2L)
val training = splits(1)
val test = splits(0)
// Run training algorithm to build the model
val model = new LogisticRegressionWithLBFGS()
.setNumClasses(2)
.run(training)
// Compute raw scores on the test set.
val predictionAndLabels = test.map { case LabeledPoint(label, features) =>
val prediction = model.predict(features)
(prediction, label)
}
// Get evaluation metrics.
val metrics = new MulticlassMetrics(predictionAndLabels)
val accuracy = metrics.accuracy
println(s"Accuracy = $accuracy")
// Save and load model
model.save(sc,"logicRegressionmodelD")
println(model.weights)
spark.close()
}
}
|
Essexwwz/heart-health-indicator
|
heart-health-indicator/src/main/scala/lrDiseasesandBMI.scala
|
<reponame>Essexwwz/heart-health-indicator
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.FeatureHasher
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.sql.{Row, SparkSession}
object lrDiseasesandBMI {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("csvreader")
.master("local")
.getOrCreate()
val prim = spark.read.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat")
.option("header","true")
.option("inferSchema","true")
.load("data/heart_disease_health_indicators_BRFSS2015.csv")
prim.createOrReplaceTempView("resultsql")
//val result = spark.sql("select * from resultsql where CholCheck = 1")//
val data = spark.sql("select HeartDiseaseorAttack,BMI,Smoker,Stroke,Diabetes from resultsql where CholCheck =1")
val splited = data.randomSplit(Array(0.6,0.4),11L)
val train_index = splited(0)
val test_index = splited(1)
//feature hasher
val hasher = new FeatureHasher()
.setInputCols("BMI","Smoker","Stroke","Diabetes")
.setOutputCol("feature")
val train_hs = hasher.transform(train_index)
val test_hs = hasher.transform(test_index)
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(0.3)
.setElasticNetParam(0)
.setFeaturesCol("feature")
.setLabelCol("HeartDiseaseorAttack")
.setPredictionCol("HeartDiseaseorAttack_predict")
val model_lr = lr.fit(train_hs)
println(s"(Diabetes,BMI,Smoker,Stroke): ${model_lr.coefficients} intercept: ${model_lr.intercept}")
val predictions = model_lr.transform(test_hs)
val prdd = predictions.select("HeartDiseaseorAttack","HeartDiseaseorAttack_predict").rdd.map{
case Row(predict:Double,attack:Double)=>(predict,attack)
}
val metrics = new MulticlassMetrics(prdd)
val accuracy = metrics.accuracy
val weightedPrecision = metrics.weightedPrecision
val weightedRecall = metrics.weightedRecall
val f1 = metrics.weightedFMeasure
println(s"LR评估结果:\n分类正确率:$accuracy\n加权正确率:$weightedPrecision\n加权召回率:$weightedRecall\nF1值:$f1")
}
}
|
Essexwwz/heart-health-indicator
|
heart-health-indicator/src/main/scala/corr.scala
|
<reponame>Essexwwz/heart-health-indicator
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row.empty.schema
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{DataTypes, DoubleType, IntegerType, StructField}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import scala.collection.immutable.ListMap
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
object corr {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("csvreader")
.master("local")
.getOrCreate()
import spark.implicits._
val sc = spark.sparkContext
val prim = spark.read.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat")
.option("header", "true")
.option("inferSchema", "true")
.load("data/heart_disease_health_indicators_BRFSS2015.csv")
prim.createOrReplaceTempView("resultsql")
val result = spark.sql("select * from resultsql where CholCheck = 1")
val ans1 = result.select("HeartDiseaseorAttack", "HighBP").rdd.map(x => (x(0).toString.toDouble, x(1).toString.toDouble))
//For cholesterol data, we need to eliminate samples that have not been tested for cholesterol
val Chol = result.select("HeartDiseaseorAttack", "HighChol").rdd.map(x => x(1).toString.toDouble)
val Attack: RDD[Double] = ans1.map(x => x._1)
val Bp: RDD[Double] = ans1.map(x => x._2)
val c1 = Statistics.corr(Bp, Attack, "spearman")
val c2 = Statistics.corr(Chol, Attack, "spearman")
println("Spearman coefficient HighBP " + c1)
println("Spearman coefficient HignChol" + c2)
println("It can be seen that heart disease is slightly positively correlated with hypertension and high cholesterol")
println("***********************************************")
val Smoker = result.select("HeartDiseaseorAttack", "Smoker").rdd.map(x => x(1).toString.toDouble)
val BMI = result.select("HeartDiseaseorAttack", "BMI").rdd.map(x => x(1).toString.toDouble)
val Diabetes =result.select("HeartDiseaseorAttack", "Diabetes").rdd.map(x => x(1).toString.toDouble)
val Phys = result.select("HeartDiseaseorAttack", "PhysActivity").rdd.map(x => x(1).toString.toDouble)
val c3 = Statistics.corr(BMI,Attack,"spearman")
val c4 = Statistics.corr(Smoker,Attack,"spearman")
val c5 = Statistics.corr(Diabetes,Attack,"spearman")
val c6 = Statistics.corr(Phys,Attack,"spearman")
val c7 = Statistics.corr(result.select("HeartDiseaseorAttack", "Fruits").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c8 = Statistics.corr(result.select("HeartDiseaseorAttack", "Veggies").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c9= Statistics.corr(result.select("HeartDiseaseorAttack", "HvyAlcoholConsump").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c10 = Statistics.corr(result.select("HeartDiseaseorAttack", "AnyHealthcare").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c11 = Statistics.corr(result.select("HeartDiseaseorAttack", "NoDocbcCost").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c12 = Statistics.corr(result.select("HeartDiseaseorAttack", "GenHlth").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c13 = Statistics.corr(result.select("HeartDiseaseorAttack", "MentHlth").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c14 = Statistics.corr(result.select("HeartDiseaseorAttack", "PhysHlth").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c15 = Statistics.corr(result.select("HeartDiseaseorAttack", "DiffWalk").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c16 = Statistics.corr(result.select("HeartDiseaseorAttack", "Sex").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c17 = Statistics.corr(result.select("HeartDiseaseorAttack", "Age").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c18 = Statistics.corr(result.select("HeartDiseaseorAttack", "Education").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c19 = Statistics.corr(result.select("HeartDiseaseorAttack", "Income").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
val c20 = Statistics.corr(result.select("HeartDiseaseorAttack", "Stroke").rdd.map(x => x(1).toString.toDouble),Attack,"spearman")
println("Spearman coefficient BMI " + c3)
println("Spearman coefficient Smoker " + c4)
println("Spearman coefficient Stroke " + c20)
println("Spearman coefficient Diabetes " + c5)
println("***********************************************")
println("Spearman coefficient PhysActivity " + c6)
println("Spearman coefficient Fruits " + c7)
println("Spearman coefficient Veggies " + c8)
println("Spearman coefficient HvyAlcoholConsump " + c9)
println("***********************************************")
println("Spearman coefficient AnyHealthCare " + c10)
println("Spearman coefficient NoDocbcCost " + c11)
println("Spearman coefficient GenHealth " + c12)
println("Spearman coefficient MentHlth " + c13)
println("Spearman coefficient PhysHlth " + c14)
println("Spearman coefficient Diffwalk " + c15)
println("***********************************************")
println("Spearman coefficient Sex " + c16)
println("Spearman coefficient Age " + c17)
println("Spearman coefficient Education " + c18)
println("Spearman coefficient Income " + c19)
println("***********************************************")
spark.close()
}
}
|
Essexwwz/heart-health-indicator
|
heart-health-indicator/src/main/scala/modelshow.scala
|
import org.apache.spark.sql.SparkSession
object modelshow {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("csvreader")
.master("local")
.getOrCreate()
val model = spark.read.format("parquet").load("data/lrmodelD.snappy.parquet")
model.show(false)
}
}
|
Aerotic/mChiselTemplate
|
src/test/scala/gcd/DECODERUnitTest.scala
|
package gcd
import java.io.File
import chisel3.util._
import chisel3.iotesters
import chisel3.iotesters.{ChiselFlatSpec, Driver, PeekPokeTester}
class DECODERUnitTester(c: DECODER) extends PeekPokeTester(c) {
private val mDecoder = c
poke(c.io.in,0)
step(1)
println()
println()
println("Out is " + peek(c.io.out).toString)
poke(c.io.in,7)
step(1)
println("Out is " + peek(c.io.out).toString)
// poke(c.io.in,"b010".U(3.W))
// step(1)
// println("Out is" + peek(c.io.out).toString)
// poke(c.io.in,"b011".U(3.W))
// step(1)
// println("Out is" + peek(c.io.out).toString)
// poke(c.io.in,"b100".U(3.W))
// poke(c.io.in,"b101".U(3.W))
// poke(c.io.in,"b110".U(3.W))
// poke(c.io.in,"b111".U(3.W))
// for(i <- 0 to 7){
// }
}
class DECODERTester extends ChiselFlatSpec {
// Disable this until we fix isCommandAvailable to swallow stderr along with stdout
private val backendNames = if(firrtl.FileUtils.isCommandAvailable(Seq("verilator", "--version"))) {
Array("firrtl", "verilator")
}
else {
Array("firrtl")
}
for ( backendName <- backendNames ) {
"DECODER" should s"calculate proper greatest common denominator (with $backendName)" in {
Driver(() => new DECODER, backendName) {
c => new DECODERUnitTester(c)
} should be (true)
}
}
"Basic test using Driver.execute" should "be used as an alternative way to run specification" in {
iotesters.Driver.execute(Array(), () => new DECODER) {
c => new DECODERUnitTester(c)
} should be (true)
}
if(backendNames.contains("verilator")) {
"using --backend-name verilator" should "be an alternative way to run using verilator" in {
iotesters.Driver.execute(Array("--backend-name", "verilator"), () => new DECODER) {
c => new DECODERUnitTester(c)
} should be(true)
}
}
"running with --is-verbose" should "show more about what's going on in your tester" in {
iotesters.Driver.execute(Array("--is-verbose"), () => new DECODER) {
c => new DECODERUnitTester(c)
} should be(true)
}
/**
* By default verilator backend produces vcd file, and firrtl and treadle backends do not.
* Following examples show you how to turn on vcd for firrtl and treadle and how to turn it off for verilator
*/
"running with --generate-vcd-output on" should "create a vcd file from your test" in {
iotesters.Driver.execute(
Array("--generate-vcd-output", "on", "--target-dir", "test_run_dir/make_a_vcd", "--top-name", "make_a_vcd"),
() => new DECODER
) {
c => new DECODERUnitTester(c)
} should be(true)
new File("test_run_dir/make_a_vcd/make_a_vcd.vcd").exists should be (true)
}
"running with --generate-vcd-output off" should "not create a vcd file from your test" in {
iotesters.Driver.execute(
Array("--generate-vcd-output", "off", "--target-dir", "test_run_dir/make_no_vcd", "--top-name", "make_no_vcd",
"--backend-name", "verilator"),
() => new DECODER
) {
c => new DECODERUnitTester(c)
} should be(true)
new File("test_run_dir/make_no_vcd/make_a_vcd.vcd").exists should be (false)
}
}
|
Aerotic/mChiselTemplate
|
src/main/scala/gcd/DECODER.scala
|
package gcd
import chisel3._
import chisel3.util._
import chisel3.experimental._ // To enable experimental features
// class MVER_IO extends Bundle {
// val ver_in = Input(UInt(3.W))
// val ver_out = Output(UInt(8.W))
// val ver_clk = Input(Clock())
// // ver_out.setName("ver_out")
// // ver_in.setName("ver_in")
// }
// class MVER_MODULE extends BlackBox with HasBlackBoxResource {
// val io = new MVER_IO()
// // renameClock(Driver.implictClock,"ver_clk")
// setResource("/mver.v")
// }
// module名必须与class名一致,区分大小写
class M_BlackBoxInline extends BlackBox with HasBlackBoxInline{
val io = IO(new Bundle() {
val ver_in = Input(UInt(3.W))
// val in2 = Input(UInt(64.W))
val ver_out = Output(UInt(8.W))
})
// addResource("mver.v")
setInline("M_BlackBoxInline.v", // 运行过程中会在输出文件夹中生成以此命名的verilog文件
s"""
|module M_BlackBoxInline(
| input reg [2:0] ver_in,
| output reg [7:0] ver_out
|);
|always @(*) begin
| case (ver_in)
| 3'b000: ver_out = 8'b0000_0001;
| 3'b001: ver_out = 8'b0000_0010;
| 3'b010: ver_out = 8'b0000_0100;
| 3'b011: ver_out = 8'b0000_1000;
| 3'b100: ver_out = 8'b0001_0000;
| 3'b101: ver_out = 8'b0010_0000;
| 3'b110: ver_out = 8'b0100_0000;
| 3'b111: ver_out = 8'b1000_0000;
| default: ver_out = 8'b0000_0001;
| endcase
|end
|endmodule // mver
""".stripMargin)
}
// module名必须与class名一致,区分大小写
class TriOctDecoder_Verilog extends BlackBox with HasBlackBoxResource{
val io = IO(new Bundle() {
val ver_in = Input(UInt(3.W)) // 这个名必须与verilog中的名字一致
val ver_out = Output(UInt(8.W))
})
addResource("/TriOctDecoder_Verilog.v") // 添加源文件,路径root在test/resources内
// setResource("/MVER1.v") 上下两种皆可,但在chisel3.2之后只能用上边那种
}
class TriOctDecoder extends Module {
val io = IO(new Bundle {
val in = Input(UInt(3.W))
val out = Output(UInt(8.W))
})
io.out := "b0000_0000".U(8.W) // io.out初始化赋值,无此则报错
switch (io.in){
is ("b000".U(3.W)){
io.out := "b0000_0001".U(8.W)
}
is ("b001".U(3.W)){
io.out := "b0000_0010".U(8.W)
}
is ("b010".U(3.W)){
io.out := "b0000_0100".U(8.W)
}
is ("b011".U(3.W)){
io.out := "b0000_1000".U(8.W)
}
is ("b100".U(3.W)){
io.out := "b0001_0000".U(8.W)
}
is ("b101".U(3.W)){
io.out := "b0010_0000".U(8.W)
}
is ("b110".U(3.W)){
io.out := "b0100_0000".U(8.W)
}
is ("b111".U(3.W)){
io.out := "b1000_0000".U(8.W)
}
}
}
class DECODER extends Module {
val io = IO(new Bundle {
val in = Input(UInt(3.W))
val out = Output(UInt(8.W))
})
val mverm = Module(new TriOctDecoder_Verilog())
io.out := mverm.io.ver_out
mverm.io.ver_in := io.in
}
|
Aerotic/mChiselTemplate
|
src/main/scala/gcd/GCD.scala
|
<reponame>Aerotic/mChiselTemplate
// See README.md for license details.
package gcd
import chisel3._
/**
* Compute GCD using subtraction method.
* Subtracts the smaller from the larger until register y is zero.
* value in register x is then the GCD
*/
class GCD extends Module {
val io = IO(new Bundle {
val value1 = Input(UInt(16.W))
val value2 = Input(UInt(16.W))
val loadingValues = Input(Bool())
val outputGCD = Output(UInt(16.W))
val outputValid = Output(Bool())
})
val x = Reg(UInt())
val y = Reg(UInt())
when(x > y) { x := x - y }
.otherwise { y := y - x }
when(io.loadingValues) {
x := io.value1
y := io.value2
}
io.outputGCD := x
io.outputValid := y === 0.U
}
|
Aerotic/mChiselTemplate
|
src/test/scala/gcd/DECODERMain.scala
|
package gcd
import chisel3._
object DECODERMain extends App {
iotesters.Driver.execute(args, () => new DECODER) {
c => new DECODERUnitTester(c)
}
}
object DECODERRepl extends App {
iotesters.Driver.executeFirrtlRepl(args, () => new DECODER)
}
|
jaceklaskowski/spark-knn
|
spark-knn-core/src/test/scala/org/apache/spark/ml/knn/KNNSuite.scala
|
<reponame>jaceklaskowski/spark-knn
package org.apache.spark.ml.knn
import org.apache.spark.ml.PredictionModel
import org.apache.spark.ml.classification.KNNClassifier
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.knn.KNN.VectorWithNorm
import org.apache.spark.ml.regression.KNNRegression
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.log4j
import org.scalatest.{FunSuite, Matchers}
import scala.collection.mutable
class KNNSuite extends FunSuite with Matchers {
val logger = log4j.Logger.getLogger(getClass)
val spark = SparkSession.builder()
.master("local")
.getOrCreate()
val sc = spark.sparkContext
private[this] val data = (-10 to 10).flatMap(i => (-10 to 10).map(j => Vectors.dense(i, j)))
private[this] val leafSize = 5
test("KNN can be fitted") {
val knn = new KNN()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setAuxCols(Array("features"))
val df = createDataFrame()
val model = knn.fit(df).setK(1)
val results = model.transform(df).collect()
results.length shouldBe data.size
results.foreach {
row =>
val vector = row.getAs[Vector](3)
val neighbors = row.getAs[mutable.WrappedArray[Row]](4)
if (neighbors.isEmpty) {
logger.error(vector.toString)
}
neighbors.length shouldBe 1
val neighbor = neighbors.head.getAs[Vector](0)
new VectorWithNorm(vector).fastSquaredDistance(new VectorWithNorm(neighbor)) shouldBe 0.0
}
}
test("KNN fits correctly with maxDistance") {
val knn = new KNN()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setAuxCols(Array("features"))
val df = createDataFrame()
val model = knn.fit(df).setK(6).setMaxDistance(1)
val results = model.transform(df).collect()
results.length shouldBe data.size
results.foreach {
row =>
val vector = row.getAs[Vector](3)
val neighbors = row.getAs[mutable.WrappedArray[Row]](4)
if (neighbors.isEmpty) {
logger.error(vector.toString)
}
val numEdges = vector.toArray.map(math.abs).count(_ == 10)
if (neighbors.length > 5 - numEdges) {
logger.error(vector.toString)
logger.error(neighbors.toList.toString)
}
neighbors.length should be <= 5 - numEdges
val closest = neighbors.head.getAs[Vector](0)
new VectorWithNorm(vector).fastSquaredDistance(new VectorWithNorm(closest)) shouldBe 0.0
val rest = neighbors.tail.map(_.getAs[Vector](0))
rest.foreach { neighbor =>
val sqDist = new VectorWithNorm(vector).fastSquaredDistance(new VectorWithNorm(neighbor))
sqDist shouldEqual 1.0 +- 1e-6
}
}
}
test("KNNClassifier can be fitted with/without weight column") {
val knn = new KNNClassifier()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setK(1)
checkKNN(knn.fit)
checkKNN(knn.setWeightCol("z").fit)
}
test("KNNRegressor can be fitted with/without weight column") {
val knn = new KNNRegression()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setK(1)
checkKNN(knn.fit)
checkKNN(knn.setWeightCol("z").fit)
}
test("KNNParmas are copied correctly") {
val knn = new KNNClassifier()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setK(2)
val model = knn.fit(createDataFrame().withColumn("label", lit(1.0)))
// check pre-set parameters are correctly copied
model.getK shouldBe 2
// check auto generated buffer size is correctly transferred
model.getBufferSize should be > 0.0
}
test("BufferSize is not estimated if rho = 0") {
val knn = new KNNClassifier()
.setTopTreeSize(data.size / 10)
.setTopTreeLeafSize(leafSize)
.setSubTreeLeafSize(leafSize)
.setBalanceThreshold(0)
val model = knn.fit(createDataFrame().withColumn("label", lit(1.0)))
model.getBufferSize shouldBe 0.0
}
private[this] def checkKNN(fit: DataFrame => PredictionModel[_, _]): Unit = {
val df = createDataFrame()
df.sqlContext.udf.register("label", { v: Vector => math.abs(v(0)) })
val training = df.selectExpr("*", "label(features) as label")
val model = fit(training)
val results = model.transform(training).select("label", "prediction").collect()
results.length shouldBe data.size
results foreach {
row => row.getDouble(0) shouldBe row.getDouble(1)
}
}
private[this] def createDataFrame(): DataFrame = {
val rdd = sc.parallelize(data.map(v => Row(v.toArray: _*)))
val assembler = new VectorAssembler()
.setInputCols(Array("x", "y"))
.setOutputCol("features")
assembler.transform(
spark.createDataFrame(rdd,
StructType(
Seq(
StructField("x", DoubleType),
StructField("y", DoubleType)
)
)
).withColumn("z", lit(1.0))
)
}
}
|
jaceklaskowski/spark-knn
|
spark-knn-core/src/main/scala/org/apache/spark/ml/knn/MetricTree.scala
|
<filename>spark-knn-core/src/main/scala/org/apache/spark/ml/knn/MetricTree.scala
package org.apache.spark.ml.knn
import breeze.linalg._
import org.apache.spark.ml.knn.KNN._
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.util.random.XORShiftRandom
import scala.collection.mutable
/**
* A [[Tree]] is used to store data points used in k-NN search. It represents
* a binary tree node. It keeps track of the pivot vector which closely approximate
* the center of all vectors within the node. All vectors are within the radius of
* distance to the pivot vector. Finally it knows the number of leaves to help
* determining partition index.
*/
private[ml] abstract class Tree extends Serializable {
val leftChild: Tree
val rightChild: Tree
val size: Int
val leafCount: Int
val pivot: VectorWithNorm
val radius: Double
def iterator: Iterator[RowWithVector]
/**
* k-NN query using pre-built [[Tree]]
* @param v vector to query
* @param k number of nearest neighbor
* @return a list of neighbor that is nearest to the query vector
*/
def query(v: Vector, k: Int = 1): Iterable[(RowWithVector, Double)] = query(new VectorWithNorm(v), k)
def query(v: VectorWithNorm, k: Int): Iterable[(RowWithVector, Double)] = query(new KNNCandidates(v, k)).toIterable
/**
* Refine k-NN candidates using data in this [[Tree]]
*/
private[knn] def query(candidates: KNNCandidates): KNNCandidates
/**
* Compute QueryCost defined as || v.center - q || - r
* when >= v.r node can be pruned
* for MetricNode this can be used to determine which child does queryVector falls into
*/
private[knn] def distance(candidates: KNNCandidates): Double = distance(candidates.queryVector)
private[knn] def distance(v: VectorWithNorm): Double =
if(pivot.vector.size > 0) pivot.fastDistance(v) else 0.0
}
private[knn]
case object Empty extends Tree {
override val leftChild = this
override val rightChild = this
override val size = 0
override val leafCount = 0
override val pivot = new VectorWithNorm(Vectors.dense(Array.empty[Double]))
override val radius = 0.0
override def iterator: Iterator[RowWithVector] = Iterator.empty
override def query(candidates: KNNCandidates): KNNCandidates = candidates
}
private[knn]
case class Leaf (data: IndexedSeq[RowWithVector],
pivot: VectorWithNorm,
radius: Double) extends Tree {
override val leftChild = Empty
override val rightChild = Empty
override val size = data.size
override val leafCount = 1
override def iterator: Iterator[RowWithVector] = data.iterator
// brute force k-NN search at the leaf
override def query(candidates: KNNCandidates): KNNCandidates = {
val sorted = data
.map{ v => (v, candidates.queryVector.fastDistance(v.vector)) }
.sortBy(_._2)
for((v, d) <- sorted if candidates.notFull || d < candidates.maxDistance)
candidates.insert(v, d)
candidates
}
}
private[knn]
object Leaf {
def apply(data: IndexedSeq[RowWithVector]): Leaf = {
val vectors = data.map(_.vector.vector.asBreeze)
val (minV, maxV) = vectors.foldLeft((vectors.head, vectors.head)) {
case ((accMin, accMax), bv) =>
(min(accMin, bv), max(accMax, bv))
}
val pivot = new VectorWithNorm((minV + maxV) / 2.0)
val radius = math.sqrt(squaredDistance(minV, maxV)) / 2.0
Leaf(data, pivot, radius)
}
}
/**
* A [[MetricTree]] represents a MetricNode where data are split into two partitions: left and right.
* There exists two pivot vectors: leftPivot and rightPivot to determine the partitioning.
* Pivot vector should be the middle of leftPivot and rightPivot vectors.
* Points that is closer to leftPivot than to rightPivot belongs to leftChild and rightChild otherwise.
*
* During search, because we have information about each child's pivot and radius, we can see if the
* hyper-sphere intersects with current candidates sphere. If so, we search the child that has the
* most potential (i.e. the child which has the closest pivot).
* Once that child has been fully searched, we backtrack to the remaining child and search if necessary.
*
* This is much more efficient than naive brute force search. However backtracking can take a lot of time
* when the number of dimension is high (due to longer time to compute distance and the volume growing much
* faster than radius).
*/
private[knn]
case class MetricTree(leftChild: Tree,
leftPivot: VectorWithNorm,
rightChild: Tree,
rightPivot: VectorWithNorm,
pivot: VectorWithNorm,
radius: Double
) extends Tree {
override val size = leftChild.size + rightChild.size
override val leafCount = leftChild.leafCount + rightChild.leafCount
override def iterator: Iterator[RowWithVector] = leftChild.iterator ++ rightChild.iterator
override def query(candidates: KNNCandidates): KNNCandidates = {
lazy val leftQueryCost = leftChild.distance(candidates)
lazy val rightQueryCost = rightChild.distance(candidates)
// only query if at least one of the children is worth looking
if(candidates.notFull ||
leftQueryCost - candidates.maxDistance < leftChild.radius ||
rightQueryCost - candidates.maxDistance < rightChild.radius ){
val remainingChild = {
if (leftQueryCost <= rightQueryCost) {
leftChild.query(candidates)
rightChild
} else {
rightChild.query(candidates)
leftChild
}
}
// check again to see if the remaining child is still worth looking
if (candidates.notFull ||
remainingChild.distance(candidates) - candidates.maxDistance < remainingChild.radius) {
remainingChild.query(candidates)
}
}
candidates
}
}
object MetricTree {
/**
* Build a (metric)[[Tree]] that facilitate k-NN query
*
* @param data vectors that contain all training data
* @param seed random number generator seed used in pivot point selecting
* @return a [[Tree]] can be used to do k-NN query
*/
def build(data: IndexedSeq[RowWithVector], leafSize: Int = 1, seed: Long = 0L): Tree = {
val size = data.size
if(size == 0) {
Empty
} else if(size <= leafSize) {
Leaf(data)
} else {
val rand = new XORShiftRandom(seed)
val randomPivot = data(rand.nextInt(size)).vector
val leftPivot = data.maxBy(v => randomPivot.fastSquaredDistance(v.vector)).vector
if(leftPivot == randomPivot) {
// all points are identical (or only one point left)
Leaf(data, randomPivot, 0.0)
} else {
val rightPivot = data.maxBy(v => leftPivot.fastSquaredDistance(v.vector)).vector
val pivot = new VectorWithNorm(Vectors.fromBreeze((leftPivot.vector.asBreeze + rightPivot.vector.asBreeze) / 2.0))
val radius = math.sqrt(data.map(v => pivot.fastSquaredDistance(v.vector)).max)
val (leftPartition, rightPartition) = data.partition{
v => leftPivot.fastSquaredDistance(v.vector) < rightPivot.fastSquaredDistance(v.vector)
}
MetricTree(
build(leftPartition, leafSize, rand.nextLong()),
leftPivot,
build(rightPartition, leafSize, rand.nextLong()),
rightPivot,
pivot,
radius
)
}
}
}
}
/**
* A [[SpillTree]] represents a SpillNode. Just like [[MetricTree]], it splits data into two partitions.
* However, instead of partition data into exactly two halves, it contains a buffer zone with size of tau.
* Left child contains all data left to the center plane + tau (in the leftPivot -> rightPivot direction).
* Right child contains all data right to the center plane - tau.
*
* Search doesn't do backtracking but rather adopt a defeatist search where it search the most prominent
* child and that child only. The buffer ensures such strategy doesn't result in a poor outcome.
*/
private[knn]
case class SpillTree(leftChild: Tree,
leftPivot: VectorWithNorm,
rightChild: Tree,
rightPivot: VectorWithNorm,
pivot: VectorWithNorm,
radius: Double,
tau: Double,
bufferSize: Int
) extends Tree {
override val size = leftChild.size + rightChild.size - bufferSize
override val leafCount = leftChild.leafCount + rightChild.leafCount
override def iterator: Iterator[RowWithVector] =
leftChild.iterator ++ rightChild.iterator.filter(childFilter(leftPivot, rightPivot))
override def query(candidates: KNNCandidates): KNNCandidates = {
if (size <= candidates.k - candidates.candidates.size) {
iterator.foreach(candidates.insert)
} else {
val leftQueryCost = candidates.queryVector.fastSquaredDistance(leftPivot)
val rightQueryCost = candidates.queryVector.fastSquaredDistance(rightPivot)
(if (leftQueryCost <= rightQueryCost) leftChild else rightChild).query(candidates)
// fill candidates with points from other child excluding buffer so we don't double count.
// depending on K and how high we are in the tree, this can be very expensive and undesirable
// TODO: revisit this idea when we do large scale testing
if(candidates.notFull) {
(if (leftQueryCost <= rightQueryCost) {
rightChild.iterator.filter(childFilter(leftPivot, rightPivot))
} else {
leftChild.iterator.filter(childFilter(rightPivot, leftPivot))
}).foreach(candidates.tryInsert)
}
}
candidates
}
private[this] val childFilter: (VectorWithNorm, VectorWithNorm) => RowWithVector => Boolean =
(p1, p2) => p => p.vector.fastDistance(p1) - p.vector.fastDistance(p2) > tau
}
object SpillTree {
/**
* Build a (spill)[[Tree]] that facilitate k-NN query
*
* @param data vectors that contain all training data
* @param tau overlapping size
* @param seed random number generators seed used in pivot point selecting
* @return a [[Tree]] can be used to do k-NN query
*/
def build(data: IndexedSeq[RowWithVector], leafSize: Int = 1, tau: Double, seed: Long = 0L): Tree = {
val size = data.size
if (size == 0) {
Empty
} else if (size <= leafSize) {
Leaf(data)
} else {
val rand = new XORShiftRandom(seed)
val randomPivot = data(rand.nextInt(size)).vector
val leftPivot = data.maxBy(v => randomPivot.fastSquaredDistance(v.vector)).vector
if (leftPivot == randomPivot) {
// all points are identical (or only one point left)
Leaf(data, randomPivot, 0.0)
} else {
val rightPivot = data.maxBy(v => leftPivot.fastSquaredDistance(v.vector)).vector
val pivot = new VectorWithNorm(Vectors.fromBreeze((leftPivot.vector.asBreeze + rightPivot.vector.asBreeze) / 2.0))
val radius = math.sqrt(data.map(v => pivot.fastSquaredDistance(v.vector)).max)
val dataWithDistance = data.map(v =>
(v, leftPivot.fastDistance(v.vector), rightPivot.fastDistance(v.vector))
)
val leftPartition = dataWithDistance.filter { case (_, left, right) => left - right <= tau }.map(_._1)
val rightPartition = dataWithDistance.filter { case (_, left, right) => right - left <= tau }.map(_._1)
SpillTree(
build(leftPartition, leafSize, tau, rand.nextLong()),
leftPivot,
build(rightPartition, leafSize, tau, rand.nextLong()),
rightPivot,
pivot,
radius,
tau,
leftPartition.size + rightPartition.size - size
)
}
}
}
}
object HybridTree {
/**
* Build a (hybrid-spill)[[Tree]] that facilitate k-NN query
*
* @param data vectors that contain all training data
* @param seed random number generator seed used in pivot point selecting
* @param tau overlapping size
* @param rho balance threshold
* @return a [[Tree]] can be used to do k-NN query
*/
//noinspection ScalaStyle
def build(data: IndexedSeq[RowWithVector],
leafSize: Int = 1,
tau: Double,
rho: Double = 0.7,
seed: Long = 0L): Tree = {
val size = data.size
if (size == 0) {
Empty
} else if (size <= leafSize) {
Leaf(data)
} else {
val rand = new XORShiftRandom(seed)
val randomPivot = data(rand.nextInt(size)).vector
val leftPivot = data.maxBy(v => randomPivot.fastSquaredDistance(v.vector)).vector
if (leftPivot == randomPivot) {
// all points are identical (or only one point left)
Leaf(data, randomPivot, 0.0)
} else {
val rightPivot = data.maxBy(v => leftPivot.fastSquaredDistance(v.vector)).vector
val pivot = new VectorWithNorm(Vectors.fromBreeze((leftPivot.vector.asBreeze + rightPivot.vector.asBreeze) / 2.0))
val radius = math.sqrt(data.map(v => pivot.fastSquaredDistance(v.vector)).max)
lazy val dataWithDistance = data.map(v =>
(v, leftPivot.fastDistance(v.vector), rightPivot.fastDistance(v.vector))
)
// implemented boundary is parabola (rather than perpendicular plane described in the paper)
lazy val leftPartition = dataWithDistance.filter { case (_, left, right) => left - right <= tau }.map(_._1)
lazy val rightPartition = dataWithDistance.filter { case (_, left, right) => right - left <= tau }.map(_._1)
if(rho <= 0.0 || leftPartition.size > size * rho || rightPartition.size > size * rho) {
//revert back to metric node
val (leftPartition, rightPartition) = data.partition{
v => leftPivot.fastSquaredDistance(v.vector) < rightPivot.fastSquaredDistance(v.vector)
}
MetricTree(
build(leftPartition, leafSize, tau, rho, rand.nextLong()),
leftPivot,
build(rightPartition, leafSize, tau, rho, rand.nextLong()),
rightPivot,
pivot,
radius
)
} else {
SpillTree(
build(leftPartition, leafSize, tau, rho, rand.nextLong()),
leftPivot,
build(rightPartition, leafSize, tau, rho, rand.nextLong()),
rightPivot,
pivot,
radius,
tau,
leftPartition.size + rightPartition.size - size
)
}
}
}
}
}
/**
* Structure to maintain search progress/results for a single query vector.
* Internally uses a PriorityQueue to maintain a max-heap to keep track of the
* next neighbor to evict.
*
* @param queryVector vector being searched
* @param k number of neighbors to return
*/
private[knn]
class KNNCandidates(val queryVector: VectorWithNorm, val k: Int) extends Serializable {
private[knn] val candidates = mutable.PriorityQueue.empty[(RowWithVector, Double)] {
Ordering.by(_._2)
}
// return the current maximum distance from neighbor to search vector
def maxDistance: Double = if(candidates.isEmpty) 0.0 else candidates.head._2
// insert evict neighbor if required. however it doesn't make sure the insert improves
// search results. it is caller's responsibility to make sure either candidate list
// is not full or the inserted neighbor brings the maxDistance down
def insert(v: RowWithVector, d: Double): Unit = {
while(candidates.size >= k) candidates.dequeue()
candidates.enqueue((v, d))
}
def insert(v: RowWithVector): Unit = insert(v, v.vector.fastDistance(queryVector))
def tryInsert(v: RowWithVector): Unit = {
val distance = v.vector.fastDistance(queryVector)
if(notFull || distance < maxDistance) insert(v, distance)
}
def toIterable: Iterable[(RowWithVector, Double)] = candidates
def notFull: Boolean = candidates.size < k
}
|
jaceklaskowski/spark-knn
|
spark-knn-examples/src/main/scala/com/github/saurfang/spark/ml/knn/examples/MNIST.scala
|
package com.github.saurfang.spark.ml.knn.examples
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.KNNClassifier
import org.apache.spark.ml.feature.PCA
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.log4j
object MNIST {
val logger = log4j.Logger.getLogger(getClass)
def main(args: Array[String]) {
val spark = SparkSession.builder().getOrCreate()
val sc = spark.sparkContext
import spark.implicits._
//read in raw label and features
val dataset = MLUtils.loadLibSVMFile(sc, "data/mnist/mnist.bz2")
.toDF()
//split traning and testing
val Array(train, test) = dataset
.randomSplit(Array(0.7, 0.3), seed = 1234L)
.map(_.cache())
//create PCA matrix to reduce feature dimensions
val pca = new PCA()
.setInputCol("features")
.setK(50)
.setOutputCol("pcaFeatures")
val knn = new KNNClassifier()
.setTopTreeSize(dataset.count().toInt / 500)
.setFeaturesCol("pcaFeatures")
.setPredictionCol("predicted")
.setK(1)
val pipeline = new Pipeline()
.setStages(Array(pca, knn))
.fit(train)
val insample = validate(pipeline.transform(train))
val outofsample = validate(pipeline.transform(test))
//reference accuracy: in-sample 95% out-of-sample 94%
logger.info(s"In-sample: $insample, Out-of-sample: $outofsample")
}
private[this] def validate(results: DataFrame): Double = {
results
.selectExpr("SUM(CASE WHEN label = predicted THEN 1.0 ELSE 0.0 END) / COUNT(1)")
.collect()
.head
.getDecimal(0)
.doubleValue()
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/DynamoDBClient.scala
|
<filename>integration/src/it/scala/dynamodb/DynamoDBClient.scala
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package dynamodb
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.scalatest.{Suite, BeforeAndAfterAll}
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.dynamodbv2._
import com.amazonaws.services.dynamodbv2.model._
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
trait DynamoDBClient
extends BeforeAndAfterAll
with AwaitHelper
{ self: Suite =>
private val logger: Logger = LoggerFactory.getLogger(self.getClass)
val client = {
val jClient = new AmazonDynamoDBAsyncClient(new BasicAWSCredentials("FAKE_ACCESS_KEY", "FAKE_SECRET_KEY"))
jClient.setEndpoint("http://localhost:8000")
new AmazonDynamoDBScalaClient(jClient)
}
val tableNames: Seq[String]
override def beforeAll(): Unit = {
deleteAllSpecifiedTables()
super.beforeAll()
}
override def afterAll(): Unit = {
try {
super.afterAll()
} finally {
deleteAllSpecifiedTables()
}
}
private def deleteAllSpecifiedTables(): Unit = {
tableNames foreach tryDeleteTable
tableNames foreach awaitTableDeletion
}
def tryDeleteTable(tableName: String): Unit = {
logger.info(s"Deleting $tableName table")
await {
client.deleteTable(tableName) recover { case e: ResourceNotFoundException => () }
}
()
}
def awaitTableDeletion(tableName: String): Unit = {
logger.info(s"Waiting for $tableName table to be deleted.")
val deadline = 10.minutes.fromNow
while (deadline.hasTimeLeft) {
try {
val result = await {
client.describeTable(tableName)
}
if (result.getTable.getTableStatus == TableStatus.ACTIVE.toString) return ()
Thread.sleep(20 * 1000)
} catch {
case e: ResourceNotFoundException =>
return ()
}
}
throw new RuntimeException(s"Timed out waiting for $tableName table to be deleted.")
}
def tryCreateTable(createTableRequest: CreateTableRequest): Unit = {
logger.info(s"Creating ${createTableRequest.getTableName()} table")
await {
client.createTable(createTableRequest)
}
()
}
def awaitTableCreation(tableName: String): TableDescription = {
logger.info(s"Waiting for $tableName table to become active.")
val deadline = 10.minutes.fromNow
while (deadline.hasTimeLeft) {
val result = await {
client.describeTable(tableName)
}
val description = result.getTable
if (description.getTableStatus == TableStatus.ACTIVE.toString)
return description
Thread.sleep(20 * 1000)
}
throw new RuntimeException(s"Timed out waiting for $tableName table to become active.")
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/s3/FutureTransferSpec.scala
|
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package s3
import scala.concurrent.ExecutionContext.Implicits.global
import java.io.File
import org.scalatest.{ FlatSpec, Matchers }
import com.amazonaws.AmazonClientException
class FutureTransferSpec
extends FlatSpec
with Matchers
with S3ClientHelper
{
val bucketName = "my-s3-bucket-98bfdf06-9475-4d1a-a235-e0eddd5859f9"
override val bucketNames = Seq(bucketName)
val objectKey = "test"
override def afterAll(): Unit = {
try {
client.client.deleteObject(bucketName, objectKey)
} finally {
super.afterAll()
}
}
"FutureTransfer" should "upload a file" in {
val file = new File(
this.getClass()
.getClassLoader()
.getResource("logback-test.xml")
.toURI())
val upload = transferManager.upload(bucketName, objectKey, file)
await {
FutureTransfer.listenFor(upload)
}
upload.waitForUploadResult()
()
}
it should "download a file" in {
val file = File.createTempFile("logback-test", ".xml")
try {
val download = transferManager.download(bucketName, objectKey, file)
await {
FutureTransfer.listenFor(download)
}
download.waitForCompletion()
()
} finally {
file.delete()
()
}
}
}
|
sunilake/aws-wrap
|
project/CompilerOptions.scala
|
<filename>project/CompilerOptions.scala
import sbt._, Keys._
object CompilerOptions extends AutoPlugin {
override def trigger = allRequirements
override lazy val projectSettings = Seq(
scalacOptions := Seq(
"-deprecation",
"-encoding", "UTF-8",
"-feature",
"-unchecked",
"-Xfatal-warnings",
"-Xfuture",
//"-Xlint",
//"-Yno-adapted-args",
"-Ywarn-dead-code",
"-Ywarn-numeric-widen",
"-Ywarn-value-discard"
) ++ (
if (scalaVersion.value.startsWith("2.10")) Nil
else List("-Ywarn-unused-import")
),
scalacOptions in (Compile, console) := Seq(
),
scalacOptions in (Compile, doc) := Seq(
)
)
}
|
sunilake/aws-wrap
|
build.sbt
|
organization in ThisBuild := "com.github.dwhjames"
licenses in ThisBuild += ("Apache-2.0", url("http://www.apache.org/licenses/LICENSE-2.0"))
scalaVersion in ThisBuild := "2.11.7"
crossScalaVersions in ThisBuild := Seq("2.10.5", "2.11.7")
shellPrompt in ThisBuild := CustomShellPrompt.customPrompt
resolvers in ThisBuild ++= Seq(
"typesafe" at "http://repo.typesafe.com/typesafe/releases",
"sonatype" at "http://oss.sonatype.org/content/repositories/releases"
)
lazy val awsWrap = project in file(".")
name := "aws-wrap"
libraryDependencies ++= Seq(
Dependencies.Compile.awsJavaSDK_cloudwatch % "provided",
Dependencies.Compile.awsJavaSDK_dynamodb % "provided",
Dependencies.Compile.awsJavaSDK_s3 % "provided",
Dependencies.Compile.awsJavaSDK_ses % "provided",
Dependencies.Compile.awsJavaSDK_simpledb % "provided",
Dependencies.Compile.awsJavaSDK_sns % "provided",
Dependencies.Compile.awsJavaSDK_sqs % "provided",
Dependencies.Compile.slf4j
)
bintrayPackageLabels := Seq("aws", "dynamodb", "s3", "ses", "simpledb", "sns", "sqs", "async", "future")
lazy val awsWrapTest = project.
in(file("integration")).
configs(IntegrationTest).
dependsOn(awsWrap)
lazy val scratch = project.
in(file("scratch")).
dependsOn(awsWrap)
|
sunilake/aws-wrap
|
project/Dependencies.scala
|
<gh_stars>0
import sbt._
object Dependencies {
object V {
val awsJavaSDK = "1.10.32"
val jodaTime = "2.9"
val jodaConvert = "1.8"
val slf4j = "1.7.12"
val logback = "1.1.3"
}
object Compile {
val awsJavaSDK_cloudwatch = "com.amazonaws" % "aws-java-sdk-cloudwatch" % V.awsJavaSDK
val awsJavaSDK_dynamodb = "com.amazonaws" % "aws-java-sdk-dynamodb" % V.awsJavaSDK
val awsJavaSDK_s3 = "com.amazonaws" % "aws-java-sdk-s3" % V.awsJavaSDK
val awsJavaSDK_ses = "com.amazonaws" % "aws-java-sdk-ses" % V.awsJavaSDK
val awsJavaSDK_simpledb = "com.amazonaws" % "aws-java-sdk-simpledb" % V.awsJavaSDK
val awsJavaSDK_sns = "com.amazonaws" % "aws-java-sdk-sns" % V.awsJavaSDK
val awsJavaSDK_sqs = "com.amazonaws" % "aws-java-sdk-sqs" % V.awsJavaSDK
// val awsJavaSDK = "com.amazonaws" % "aws-java-sdk" % V.awsJavaSDK exclude("joda-time", "joda-time")
val jodaTime = "joda-time" % "joda-time" % V.jodaTime
val jodaConvert = "org.joda" % "joda-convert" % V.jodaConvert
val slf4j = "org.slf4j" % "slf4j-api" % V.slf4j
val logback = "ch.qos.logback" % "logback-classic" % V.logback
}
object IntegrationTest {
val scalaTest = "org.scalatest" %% "scalatest" % "2.2.4" % "it"
}
}
|
sunilake/aws-wrap
|
src/main/scala/ses/ses.scala
|
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package ses
import scala.concurrent.{Future, ExecutionContext}
import scala.collection.JavaConverters._
import java.util.concurrent.ExecutorService
import com.amazonaws.services.simpleemail._
import com.amazonaws.services.simpleemail.model._
class AmazonSimpleEmailServiceScalaClient(val client: AmazonSimpleEmailServiceAsyncClient) {
def deleteIdentity(
deleteIdentityRequest: DeleteIdentityRequest
): Future[DeleteIdentityResult] =
wrapAsyncMethod(client.deleteIdentityAsync, deleteIdentityRequest)
def deleteIdentity(
identity: String
): Future[DeleteIdentityResult] =
deleteIdentity(
new DeleteIdentityRequest()
.withIdentity(identity)
)
def deleteVerifiedEmailAddress(
deleteVerifiedEmailAddressRequest: DeleteVerifiedEmailAddressRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.deleteVerifiedEmailAddressAsync, deleteVerifiedEmailAddressRequest)
def deleteVerifiedEmailAddress(
emailAddress: String
): Future[Unit] =
deleteVerifiedEmailAddress(
new DeleteVerifiedEmailAddressRequest()
.withEmailAddress(emailAddress)
)
def getExecutorService(): ExecutorService =
client.getExecutorService()
def getExecutionContext(): ExecutionContext =
ExecutionContext.fromExecutorService(client.getExecutorService())
def getIdentityDkimAttributes(
getIdentityDkimAttributesRequest: GetIdentityDkimAttributesRequest
): Future[GetIdentityDkimAttributesResult] =
wrapAsyncMethod(client.getIdentityDkimAttributesAsync, getIdentityDkimAttributesRequest)
def getIdentityDkimAttributes(
identities: Iterable[String]
): Future[GetIdentityDkimAttributesResult] =
getIdentityDkimAttributes(
new GetIdentityDkimAttributesRequest()
.withIdentities(identities.asJavaCollection)
)
def getIdentityNotificationAttributes(
getIdentityNotificationAttributesRequest: GetIdentityNotificationAttributesRequest
): Future[GetIdentityNotificationAttributesResult] =
wrapAsyncMethod(client.getIdentityNotificationAttributesAsync, getIdentityNotificationAttributesRequest)
def getIdentityNotificationAttributes(
identities: Iterable[String]
): Future[GetIdentityNotificationAttributesResult] =
getIdentityNotificationAttributes(
new GetIdentityNotificationAttributesRequest()
.withIdentities(identities.asJavaCollection)
)
def getIdentityVerificationAttributes(
getIdentityVerificationAttributesRequest: GetIdentityVerificationAttributesRequest
): Future[GetIdentityVerificationAttributesResult] =
wrapAsyncMethod(client.getIdentityVerificationAttributesAsync, getIdentityVerificationAttributesRequest)
def getIdentityVerificationAttributes(
identities: Iterable[String]
): Future[GetIdentityVerificationAttributesResult] =
getIdentityVerificationAttributes(
new GetIdentityVerificationAttributesRequest()
.withIdentities(identities.asJavaCollection)
)
def getSendQuota(
getSendQuotaRequest: GetSendQuotaRequest
): Future[GetSendQuotaResult] =
wrapAsyncMethod(client.getSendQuotaAsync, getSendQuotaRequest)
def getSendQuota(): Future[GetSendQuotaResult] =
getSendQuota(new GetSendQuotaRequest)
def getSendStatistics(
getSendStatisticsRequest: GetSendStatisticsRequest
): Future[GetSendStatisticsResult] =
wrapAsyncMethod(client.getSendStatisticsAsync, getSendStatisticsRequest)
def getSendStatistics(): Future[GetSendStatisticsResult] =
getSendStatistics(new GetSendStatisticsRequest())
def listIdentities(
listIdentitiesRequest: ListIdentitiesRequest
): Future[ListIdentitiesResult] =
wrapAsyncMethod(client.listIdentitiesAsync, listIdentitiesRequest)
def listIdentities(): Future[ListIdentitiesResult] =
listIdentities(new ListIdentitiesRequest)
@deprecated("Use listIdentities", "May 15, 2012")
def listVerifiedEmailAddresses(
listVerifiedEmailAddressesRequest: ListVerifiedEmailAddressesRequest
): Future[ListVerifiedEmailAddressesResult] =
wrapAsyncMethod(client.listVerifiedEmailAddressesAsync, listVerifiedEmailAddressesRequest)
def sendEmail(
sendEmailRequest: SendEmailRequest
): Future[SendEmailResult] =
wrapAsyncMethod(client.sendEmailAsync, sendEmailRequest)
def sendEmail(
source: String,
destination: Destination,
message: Message
): Future[SendEmailResult] =
sendEmail(new SendEmailRequest(source, destination, message))
def sendRawEmail(
sendRawEmailRequest: SendRawEmailRequest
): Future[SendRawEmailResult] =
wrapAsyncMethod(client.sendRawEmailAsync, sendRawEmailRequest)
def sendRawEmail(
rawMessage: RawMessage
): Future[SendRawEmailResult] =
sendRawEmail(new SendRawEmailRequest(rawMessage))
def setIdentityDkimEnabled(
setIdentityDkimEnabledRequest: SetIdentityDkimEnabledRequest
): Future[SetIdentityDkimEnabledResult] =
wrapAsyncMethod(client.setIdentityDkimEnabledAsync, setIdentityDkimEnabledRequest)
def setIdentityDkimEnabled(
dkimEnabled: Boolean,
identity: String
): Future[SetIdentityDkimEnabledResult] =
setIdentityDkimEnabled(
new SetIdentityDkimEnabledRequest()
.withDkimEnabled(dkimEnabled)
.withIdentity(identity)
)
def setIdentityFeedbackForwardingEnabled(
setIdentityFeedbackForwardingEnabledRequest: SetIdentityFeedbackForwardingEnabledRequest
): Future[SetIdentityFeedbackForwardingEnabledResult] =
wrapAsyncMethod(client.setIdentityFeedbackForwardingEnabledAsync, setIdentityFeedbackForwardingEnabledRequest)
def setIdentityFeedbackForwardingEnabled(
forwardingEnabled: Boolean,
identity: String
): Future[SetIdentityFeedbackForwardingEnabledResult] =
setIdentityFeedbackForwardingEnabled(
new SetIdentityFeedbackForwardingEnabledRequest()
.withForwardingEnabled(forwardingEnabled)
.withIdentity(identity)
)
def setIdentityNotificationTopic(
setIdentityNotificationTopicRequest: SetIdentityNotificationTopicRequest
): Future[SetIdentityNotificationTopicResult] =
wrapAsyncMethod(client.setIdentityNotificationTopicAsync, setIdentityNotificationTopicRequest)
def shutdown(): Unit =
client.shutdown()
def verifyDomainDkim(
verifyDomainDkimRequest: VerifyDomainDkimRequest
): Future[VerifyDomainDkimResult] =
wrapAsyncMethod(client.verifyDomainDkimAsync, verifyDomainDkimRequest)
def verifyDomainDkim(
domain: String
): Future[VerifyDomainDkimResult] =
verifyDomainDkim(
new VerifyDomainDkimRequest()
.withDomain(domain)
)
def verifyDomainIdentity(
verifyDomainIdentityRequest: VerifyDomainIdentityRequest
): Future[VerifyDomainIdentityResult] =
wrapAsyncMethod(client.verifyDomainIdentityAsync, verifyDomainIdentityRequest)
def verifyDomainIdentity(
domain: String
): Future[VerifyDomainIdentityResult] =
verifyDomainIdentity(
new VerifyDomainIdentityRequest()
.withDomain(domain)
)
@deprecated("Use verifyEmailIdentity", "May 15, 2012")
def verifyEmailAddress(
verifyEmailAddressRequest: VerifyEmailAddressRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.verifyEmailAddressAsync, verifyEmailAddressRequest)
def verifyEmailIdentity(
verifyEmailIdentityRequest: VerifyEmailIdentityRequest
): Future[VerifyEmailIdentityResult] =
wrapAsyncMethod(client.verifyEmailIdentityAsync, verifyEmailIdentityRequest)
def verifyEmailIdentity(
emailAddress: String
): Future[VerifyEmailIdentityResult] =
verifyEmailIdentity(
new VerifyEmailIdentityRequest()
.withEmailAddress(emailAddress)
)
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/GlobalSecondaryIndexSpec.scala
|
<reponame>sunilake/aws-wrap
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.scalatest.{ FlatSpec, BeforeAndAfterAll, Matchers }
import com.amazonaws.AmazonClientException
class QueryGlobalSecondaryIndexSpec
extends FlatSpec
with Matchers
with DynamoDBClient
{
import SampleData.sampleGameScores
override val tableNames = Seq(GameScore.tableName)
val mapper = AmazonDynamoDBScalaMapper(client)
override def beforeAll(): Unit = {
super.beforeAll()
tryCreateTable(GameScore.tableRequest)
awaitTableCreation(GameScore.tableName)
await(30.seconds) {
mapper.batchDump(sampleGameScores)
}
}
"DynamoDB" should s"contain the '${GameScore.tableName}' table" in {
val result = await(1.minutes) {
client.listTables()
}
result.getTableNames().asScala should contain (GameScore.tableName)
}
it should s"contain ${sampleGameScores.size} game score items" in {
await {
mapper.countScan[GameScore]()
} should equal (sampleGameScores.size)
}
it should s"return top ten high scores using global secondary index" in {
val result = await {
mapper.query[GameScore](
GameScore.globalSecondaryIndexName,
GameScore.Attributes.gameTitle,
"Galaxy Invaders",
Some(GameScore.Attributes.topScore -> QueryCondition.greaterThan(0)),
false,
10 // top ten high scores
)
}
result should have size (2)
result(0).userId should equal ("101")
result(1).userId should equal ("103")
val result2 = await {
mapper.queryOnce[GameScore](
GameScore.globalSecondaryIndexName,
GameScore.Attributes.gameTitle,
"Galaxy Invaders",
Some(GameScore.Attributes.topScore -> QueryCondition.greaterThan(0)),
false,
10 // top ten high scores
)
}
result2 should have size (2)
result2(0).userId should equal ("101")
result2(1).userId should equal ("103")
}
}
|
sunilake/aws-wrap
|
src/main/scala/package.scala
|
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames
import scala.concurrent.{Future, Promise}
import java.util.concurrent.{Future => JFuture}
import com.amazonaws.AmazonWebServiceRequest
import com.amazonaws.handlers.AsyncHandler
package object awswrap {
private def promiseToAsyncHandler[Request <: AmazonWebServiceRequest, Result](p: Promise[Result]) =
new AsyncHandler[Request, Result] {
override def onError(exception: Exception): Unit = { p.failure(exception); () }
override def onSuccess(request: Request, result: Result): Unit = { p.success(result); () }
}
private def promiseToVoidAsyncHandler[Request <: AmazonWebServiceRequest](p: Promise[Unit]) =
new AsyncHandler[Request, Void] {
override def onError(exception: Exception): Unit = { p.failure(exception); () }
override def onSuccess(request: Request, result: Void): Unit = { p.success(()); () }
}
@inline
private[awswrap] def wrapAsyncMethod[Request <: AmazonWebServiceRequest, Result](
f: (Request, AsyncHandler[Request, Result]) => JFuture[Result],
request: Request
): Future[Result] = {
val p = Promise[Result]
f(request, promiseToAsyncHandler(p))
p.future
}
@inline
private[awswrap] def wrapVoidAsyncMethod[Request <: AmazonWebServiceRequest](
f: (Request, AsyncHandler[Request, Void]) => JFuture[Void],
request: Request
): Future[Unit] = {
val p = Promise[Unit]
f(request, promiseToVoidAsyncHandler(p))
p.future
}
}
|
sunilake/aws-wrap
|
src/main/scala/dynamodb/conditions.scala
|
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import scala.collection.JavaConverters._
import com.amazonaws.services.dynamodbv2.model.{ComparisonOperator, Condition}
private[dynamodb] trait AttributeConditions {
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def equalTo[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.EQ)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def lessThan[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.LT)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def lessThanOrEqual[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.LE)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def greaterThan[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.GT)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def greaterThanOrEqual[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.GE)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def beginsWith(attrVal: String): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.BEGINS_WITH)
.withAttributeValueList(new AttributeValue().withS(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def beginsWith(attrVal: Array[Byte]): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.BEGINS_WITH)
.withAttributeValueList(new AttributeValue().withB(java.nio.ByteBuffer.wrap(attrVal)))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def between[K](lower: K, upper: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.BETWEEN)
.withAttributeValueList(conv(lower), conv(upper))
}
/**
* A factory of Conditions for queries.
*
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
object QueryCondition extends AttributeConditions
/**
* A factory of Conditions for scans.
*
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
object ScanCondition extends AttributeConditions {
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def notEqualTo[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.NE)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
val isNull: Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.NULL)
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
val notNull: Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.NOT_NULL)
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def contains[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.CONTAINS)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def doesNotContain[K](attrVal: K)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.NOT_CONTAINS)
.withAttributeValueList(conv(attrVal))
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def in[K](attrVals: K*)(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.IN)
.withAttributeValueList(attrVals.map(conv).asJavaCollection)
/**
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html DynamoDB API Reference]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/Condition.html AWS Java SDK]]
*/
def in[K](attrVals: Set[K])(implicit conv: K => AttributeValue): Condition =
new Condition()
.withComparisonOperator(ComparisonOperator.IN)
.withAttributeValueList(attrVals.map(conv).asJavaCollection)
}
|
sunilake/aws-wrap
|
src/main/scala/dynamodb/schema.scala
|
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model.{
AttributeDefinition, KeySchemaElement, KeyType,
ProvisionedThroughput, ScalarAttributeType
}
/**
* Convenience methods for constructing DynamoDB table schemas.
*
* @see [[http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html DynamoDB API Reference]]
*/
object Schema {
/**
* Constructs a
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/ProvisionedThroughput.html ProvisionedThroughput]]
* model.
*
* @param readCapacityUnits
* the number of read capacity units to provision.
* @param writeCapacityUnits
* the number of write capacity units to provision.
* @return a ProvisionedThroughput model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AWS Java SDK]]
*/
def provisionedThroughput(readCapacityUnits: Long, writeCapacityUnits: Long): ProvisionedThroughput =
new ProvisionedThroughput()
.withReadCapacityUnits(readCapacityUnits)
.withWriteCapacityUnits(writeCapacityUnits)
/**
* Constructs an
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AttributeDefinition]]
* model for a string attribute.
*
* @param attributeName
* the name of the attribute.
* @return an AttributeDefinition model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AWS Java SDK]]
*/
def stringAttribute(attributeName: String): AttributeDefinition =
new AttributeDefinition()
.withAttributeName(attributeName)
.withAttributeType(ScalarAttributeType.S)
/**
* Constructs an
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AttributeDefinition]]
* model for a number attribute.
*
* @param attributeName
* the name of the attribute.
* @return an AttributeDefinition model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AWS Java SDK]]
*/
def numberAttribute(attributeName: String): AttributeDefinition =
new AttributeDefinition()
.withAttributeName(attributeName)
.withAttributeType(ScalarAttributeType.N)
/**
* Constructs an
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AttributeDefinition]]
* model for a number attribute.
*
* @param attributeName
* the name of the attribute.
* @return an AttributeDefinition model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/AttributeDefinition.html AWS Java SDK]]
*/
def binaryAttribute(attributeName: String): AttributeDefinition =
new AttributeDefinition()
.withAttributeName(attributeName)
.withAttributeType(ScalarAttributeType.B)
/**
* Constructs a
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/KeySchemaElement.html KeySchemaElement]]
* model for a hash key.
*
* @param attributeName
* the name of the attribute.
* @return a KeySchemaElement model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/KeySchemaElement.html AWS Java SDK]]
*/
def hashKey(attributeName: String): KeySchemaElement =
new KeySchemaElement()
.withAttributeName(attributeName)
.withKeyType(KeyType.HASH)
/**
* Constructs a
* [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/KeySchemaElement.html KeySchemaElement]]
* model for a range key.
*
* @param attributeName
* the name of the attribute.
* @return a KeySchemaElement model.
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/KeySchemaElement.html AWS Java SDK]]
*/
def rangeKey(attributeName: String): KeySchemaElement =
new KeySchemaElement()
.withAttributeName(attributeName)
.withKeyType(KeyType.RANGE)
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/Forum.scala
|
<filename>integration/src/it/scala/dynamodb/Forum.scala<gh_stars>0
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model._
case class Forum(
name: String,
category: String,
threads: Long,
messages: Long,
views: Long
)
object Forum {
val tableName = "Forum"
val tableRequest =
new CreateTableRequest()
.withTableName(Forum.tableName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withAttributeDefinitions(Schema.stringAttribute(Attributes.name))
.withKeySchema(Schema.hashKey(Attributes.name))
object Attributes {
val name = "Name"
val category = "Category"
val threads = "Threads"
val messages = "Messages"
val views = "Views"
}
implicit object forumSerializer extends DynamoDBSerializer[Forum] {
override val tableName = Forum.tableName
override val hashAttributeName = Attributes.name
override def primaryKeyOf(forum: Forum) =
Map(Attributes.name -> forum.name)
override def toAttributeMap(forum: Forum) =
Map(
Attributes.name -> forum.name,
Attributes.category -> forum.category,
Attributes.threads -> forum.threads,
Attributes.messages -> forum.messages,
Attributes.views -> forum.views
)
override def fromAttributeMap(item: collection.mutable.Map[String, AttributeValue]) =
Forum(
name = item(Attributes.name),
category = item(Attributes.category),
threads = item(Attributes.threads),
messages = item(Attributes.messages),
views = item(Attributes.views)
)
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/s3/S3ClientHelper.scala
|
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package s3
import org.scalatest.{Suite, BeforeAndAfterAll}
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.s3._
import com.amazonaws.services.s3.transfer._
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
trait S3ClientHelper
extends BeforeAndAfterAll
with AwaitHelper
{ self: Suite =>
private val logger: Logger = LoggerFactory.getLogger(self.getClass)
val client = {
val c = new AmazonS3ScalaClient(new BasicAWSCredentials("FAKE_ACCESS_KEY", "FAKE_SECRET_KEY"))
c.client.setEndpoint("http://localhost:4000")
c.client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true))
c
}
val transferManager = new TransferManager(client.client)
val bucketNames: Seq[String]
override def beforeAll(): Unit = {
bucketNames foreach { name =>
logger.info(s"Creating bucket $name")
client.client.createBucket(name)
}
super.beforeAll()
}
override def afterAll(): Unit = {
try {
super.afterAll()
bucketNames foreach { name =>
logger.info(s"Deleting bucket $name")
client.client.deleteBucket(name)
}
} finally {
transferManager.shutdownNow()
client.shutdown()
}
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/Reply.scala
|
<gh_stars>0
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
case class Reply(
id: String,
replyDateTime: DateTime,
message: String,
postedBy: String
)
object Reply {
val tableName = "Reply"
val secondaryIndexName = "PostedByIndex"
val tableRequest =
new CreateTableRequest()
.withTableName(Reply.tableName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withAttributeDefinitions(
Schema.stringAttribute(Attributes.id),
Schema.stringAttribute(Attributes.replyDateTime),
Schema.stringAttribute(Attributes.postedBy)
)
.withKeySchema(
Schema.hashKey(Attributes.id),
Schema.rangeKey(Attributes.replyDateTime)
)
.withLocalSecondaryIndexes(
new LocalSecondaryIndex()
.withIndexName(Reply.secondaryIndexName)
.withKeySchema(
Schema.hashKey(Attributes.id),
Schema.rangeKey(Attributes.postedBy)
)
.withProjection(
new Projection()
.withProjectionType(ProjectionType.KEYS_ONLY)
)
)
object Attributes {
val id = "Id"
val replyDateTime = "ReplyDateTime"
val message = "Message"
val postedBy = "PostedBy"
}
implicit object replySerializer extends DynamoDBSerializer[Reply] {
private val fmt = ISODateTimeFormat.dateTime
override val tableName = Reply.tableName
override val hashAttributeName = Attributes.id
override val rangeAttributeName = Some(Attributes.replyDateTime)
override def primaryKeyOf(reply: Reply) =
Map(
Attributes.id -> reply.id,
Attributes.replyDateTime -> fmt.print(reply.replyDateTime)
)
override def toAttributeMap(reply: Reply) =
Map(
Attributes.id -> reply.id,
Attributes.replyDateTime -> fmt.print(reply.replyDateTime),
Attributes.message -> reply.message,
Attributes.postedBy -> reply.postedBy
)
override def fromAttributeMap(item: collection.mutable.Map[String, AttributeValue]) =
Reply(
id = item(Attributes.id),
replyDateTime = fmt.parseDateTime(item(Attributes.replyDateTime)),
message = item(Attributes.message),
postedBy = item(Attributes.postedBy)
)
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/GameScore.scala
|
<gh_stars>0
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
case class GameScore(
userId: String,
gameTitle: String,
topScore: Long,
topScoreDateTime: DateTime,
wins: Long,
losses: Long
)
object GameScore {
val tableName = "GameScores"
val globalSecondaryIndexName = "GameTitleIndex"
val tableRequest =
new CreateTableRequest()
.withTableName(GameScore.tableName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withAttributeDefinitions(
Schema.stringAttribute(Attributes.userId),
Schema.stringAttribute(Attributes.gameTitle),
Schema.numberAttribute(Attributes.topScore)
)
.withKeySchema(
Schema.hashKey(Attributes.userId),
Schema.rangeKey(Attributes.gameTitle)
)
.withGlobalSecondaryIndexes(
new GlobalSecondaryIndex()
.withIndexName(GameScore.globalSecondaryIndexName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withKeySchema(
Schema.hashKey(Attributes.gameTitle),
Schema.rangeKey(Attributes.topScore)
)
.withProjection(
new Projection()
.withProjectionType(ProjectionType.KEYS_ONLY)
)
)
object Attributes {
val userId = "UserId"
val gameTitle = "GameTitle"
val topScore = "TopScore"
val topScoreDateTime = "TopScoreDateTime"
val wins = "Wins"
val losses = "Losses"
}
implicit object sameScoreSerializer extends DynamoDBSerializer[GameScore] {
private val fmt = ISODateTimeFormat.dateTime
override val tableName = GameScore.tableName
override val hashAttributeName = Attributes.userId
override val rangeAttributeName = Some(Attributes.gameTitle)
override def primaryKeyOf(score: GameScore) =
Map(
Attributes.userId -> score.userId,
Attributes.gameTitle -> score.gameTitle
)
override def toAttributeMap(score: GameScore) =
Map(
Attributes.userId -> score.userId,
Attributes.gameTitle -> score.gameTitle,
Attributes.topScore -> score.topScore,
Attributes.topScoreDateTime -> fmt.print(score.topScoreDateTime),
Attributes.wins -> score.wins,
Attributes.losses -> score.losses
)
override def fromAttributeMap(item: collection.mutable.Map[String, AttributeValue]) =
GameScore(
userId = item(Attributes.userId),
gameTitle = item(Attributes.gameTitle),
topScore = item(Attributes.topScore),
topScoreDateTime = fmt.parseDateTime(item(Attributes.topScoreDateTime)),
wins = item(Attributes.wins),
losses = item(Attributes.losses)
)
}
}
|
sunilake/aws-wrap
|
scratch/build.sbt
|
<filename>scratch/build.sbt
libraryDependencies ++= Seq(
Dependencies.Compile.awsJavaSDK_dynamodb,
Dependencies.Compile.awsJavaSDK_s3,
Dependencies.Compile.jodaTime,
Dependencies.Compile.jodaConvert,
Dependencies.Compile.logback
)
publish := ()
publishLocal := ()
|
sunilake/aws-wrap
|
src/main/scala/dynamodb/dynamodb.scala
|
<reponame>sunilake/aws-wrap<filename>src/main/scala/dynamodb/dynamodb.scala
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package dynamodb
import scala.concurrent.{Future, ExecutionContext}
import scala.collection.JavaConverters._
import java.util.concurrent.ExecutorService
import com.amazonaws.services.dynamodbv2._
import com.amazonaws.services.dynamodbv2.model._
/**
* A lightweight wrapper for [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonDynamoDBAsyncClient]].
*
* @constructor construct a wrapper client from an Amazon async client.
* @param client
* the underlying [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonDynamoDBAsyncClient]].
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonDynamoDBAsyncClient]]
*/
class AmazonDynamoDBScalaClient(val client: AmazonDynamoDBAsyncClient) {
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#batchGetItem(com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest) AWS Java SDK]]
*/
def batchGetItem(
batchGetItemRequest: BatchGetItemRequest
): Future[BatchGetItemResult] =
wrapAsyncMethod[BatchGetItemRequest, BatchGetItemResult](client.batchGetItemAsync, batchGetItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#batchGetItem(com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest) AWS Java SDK]]
*/
def batchGetItem(
requestItems: Map[String, KeysAndAttributes]
): Future[BatchGetItemResult] =
batchGetItem(
new BatchGetItemRequest()
.withRequestItems(requestItems.asJava)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#batchWriteItem(com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest) AWS Java SDK]]
*/
def batchWriteItem(
batchWriteItemRequest: BatchWriteItemRequest
): Future[BatchWriteItemResult] =
wrapAsyncMethod[BatchWriteItemRequest, BatchWriteItemResult](client.batchWriteItemAsync, batchWriteItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#batchWriteItem(com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest) AWS Java SDK]]
*/
def batchWriteItem(
requestItems: Map[String, Seq[WriteRequest]]
): Future[BatchWriteItemResult] =
batchWriteItem(
new BatchWriteItemRequest()
.withRequestItems(requestItems.map(p => (p._1, p._2.asJava)).asJava)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#createTable(com.amazonaws.services.dynamodbv2.model.CreateTableRequest) AWS Java SDK]]
*/
def createTable(
createTableRequest: CreateTableRequest
): Future[CreateTableResult] =
wrapAsyncMethod(client.createTableAsync, createTableRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#createTable(com.amazonaws.services.dynamodbv2.model.CreateTableRequest) AWS Java SDK]]
*/
def createTable(
tableName: String,
provisionedThroughput: ProvisionedThroughput,
attributeDefinitions: Seq[AttributeDefinition],
keySchema: Seq[KeySchemaElement],
localSecondaryIndexes: Seq[LocalSecondaryIndex] = Seq.empty
): Future[CreateTableResult] =
createTable(
new CreateTableRequest()
.withTableName(tableName)
.withProvisionedThroughput(provisionedThroughput)
.withAttributeDefinitions(attributeDefinitions.asJavaCollection)
.withKeySchema(keySchema.asJavaCollection)
.withLocalSecondaryIndexes(localSecondaryIndexes.asJavaCollection)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#deleteItem(com.amazonaws.services.dynamodbv2.model.DeleteItemRequest) AWS Java SDK]]
*/
def deleteItem(
deleteItemRequest: DeleteItemRequest
): Future[DeleteItemResult] =
wrapAsyncMethod(client.deleteItemAsync, deleteItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#deleteItem(com.amazonaws.services.dynamodbv2.model.DeleteItemRequest) AWS Java SDK]]
*/
def deleteItem(
tableName: String,
key: Map[String, AttributeValue]
): Future[DeleteItemResult] =
deleteItem(new DeleteItemRequest(tableName, key.asJava))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#deleteTable(com.amazonaws.services.dynamodbv2.model.DeleteTableRequest) AWS Java SDK]]
*/
def deleteTable(
deleteTableRequest: DeleteTableRequest
): Future[DeleteTableResult] =
wrapAsyncMethod[DeleteTableRequest, DeleteTableResult](client.deleteTableAsync, deleteTableRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#deleteTable(com.amazonaws.services.dynamodbv2.model.DeleteTableRequest) AWS Java SDK]]
*/
def deleteTable(
tableName: String
): Future[DeleteTableResult] =
deleteTable(new DeleteTableRequest(tableName))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#describeTable(com.amazonaws.services.dynamodbv2.model.DescribeTableRequest) AWS Java SDK]]
*/
def describeTable(
describeTableRequest: DescribeTableRequest
): Future[DescribeTableResult] =
wrapAsyncMethod[DescribeTableRequest, DescribeTableResult](client.describeTableAsync, describeTableRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#describeTable(com.amazonaws.services.dynamodbv2.model.DescribeTableRequest) AWS Java SDK]]
*/
def describeTable(
tableName: String
): Future[DescribeTableResult] =
describeTable(
new DescribeTableRequest()
.withTableName(tableName)
)
def getExecutorService(): ExecutorService =
client.getExecutorService()
def getExecutionContext(): ExecutionContext =
ExecutionContext.fromExecutorService(client.getExecutorService())
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#getItem(com.amazonaws.services.dynamodbv2.model.GetItemRequest) AWS Java SDK]]
*/
def getItem(
getItemRequest: GetItemRequest
): Future[GetItemResult] =
wrapAsyncMethod(client.getItemAsync, getItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#getItem(com.amazonaws.services.dynamodbv2.model.GetItemRequest) AWS Java SDK]]
*/
def getItem(
tableName: String,
key: Map[String, AttributeValue],
attributesToGet: Iterable[String] = Iterable.empty,
consistentRead: Boolean = false
): Future[GetItemResult] =
getItem(
new GetItemRequest(tableName, key.asJava)
.withAttributesToGet(attributesToGet.asJavaCollection)
.withConsistentRead(consistentRead)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#listTables(com.amazonaws.services.dynamodbv2.model.ListTablesRequest) AWS Java SDK]]
*/
def listTables(
listTablesRequest: ListTablesRequest
): Future[ListTablesResult] =
wrapAsyncMethod[ListTablesRequest, ListTablesResult](client.listTablesAsync, listTablesRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#listTables(com.amazonaws.services.dynamodbv2.model.ListTablesRequest) AWS Java SDK]]
*/
def listTables(): Future[ListTablesResult] =
listTables(new ListTablesRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#putItem(com.amazonaws.services.dynamodbv2.model.PutItemRequest) AWS Java SDK]]
*/
def putItem(
putItemRequest: PutItemRequest
): Future[PutItemResult] =
wrapAsyncMethod(client.putItemAsync, putItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#putItem(com.amazonaws.services.dynamodbv2.model.PutItemRequest) AWS Java SDK]]
*/
def putItem(
tableName: String,
item: Map[String, AttributeValue]
): Future[PutItemResult] =
putItem(new PutItemRequest(tableName, item.asJava))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#query(com.amazonaws.services.dynamodbv2.model.QueryRequest) AWS Java SDK]]
*/
def query(
queryRequest: QueryRequest
): Future[QueryResult] =
wrapAsyncMethod(client.queryAsync, queryRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#query(com.amazonaws.services.dynamodbv2.model.QueryRequest) AWS Java SDK]]
*/
def query(
tableName: String,
keyConditions: Map[String, Condition]
): Future[QueryResult] =
query(
new QueryRequest()
.withTableName(tableName)
.withKeyConditions(keyConditions.asJava)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#scan(com.amazonaws.services.dynamodbv2.model.ScanRequest) AWS Java SDK]]
*/
def scan(
scanRequest: ScanRequest
): Future[ScanResult] =
wrapAsyncMethod(client.scanAsync, scanRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#scan(com.amazonaws.services.dynamodbv2.model.ScanRequest) AWS Java SDK]]
*/
def scan(
tableName: String,
scanFilter: Map[String, Condition] = Map.empty
): Future[ScanResult] =
scan(
new ScanRequest(tableName)
.withScanFilter(scanFilter.asJava)
)
def shutdown(): Unit =
client.shutdown()
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#updateItem(com.amazonaws.services.dynamodbv2.model.UpdateItemRequest) AWS Java SDK]]
*/
def updateItem(
updateItemRequest: UpdateItemRequest
): Future[UpdateItemResult] =
wrapAsyncMethod(client.updateItemAsync, updateItemRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#updateItem(com.amazonaws.services.dynamodbv2.model.UpdateItemRequest) AWS Java SDK]]
*/
def updateItem(
tableName: String,
key: Map[String, AttributeValue],
attributeUpdates: Map[String, AttributeValueUpdate]
): Future[UpdateItemResult] =
updateItem(new UpdateItemRequest(tableName, key.asJava, attributeUpdates.asJava))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#updateTable(com.amazonaws.services.dynamodbv2.model.UpdateTableRequest) AWS Java SDK]]
*/
def updateTable(
updateTableRequest: UpdateTableRequest
): Future[UpdateTableResult] =
wrapAsyncMethod(client.updateTableAsync, updateTableRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDB.html#updateTable(com.amazonaws.services.dynamodbv2.model.UpdateTableRequest) AWS Java SDK]]
*/
def updateTable(
tableName: String,
provisionedThroughput: ProvisionedThroughput
): Future[UpdateTableResult] =
updateTable(
new UpdateTableRequest()
.withTableName(tableName)
.withProvisionedThroughput(provisionedThroughput)
)
}
|
sunilake/aws-wrap
|
src/main/scala/cloudwatch/cloudwatch.scala
|
<filename>src/main/scala/cloudwatch/cloudwatch.scala
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package cloudwatch
import scala.collection.JavaConverters._
import scala.concurrent.Future
import com.amazonaws.services.cloudwatch.AmazonCloudWatchAsyncClient
import com.amazonaws.services.cloudwatch.model._
/**
* A lightweight wrapper for [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonCloudWatchAsyncClient]].
*
* @constructor construct a wrapper client from an Amazon async client.
* @param client
* the underlying [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonCloudWatchAsyncClient]].
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBAsyncClient.html AmazonCloudWatchAsyncClient]]
*/
class AmazonCloudWatchScalaClient(val client: AmazonCloudWatchAsyncClient) {
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#deleteAlarms(com.amazonaws.services.cloudwatch.model.DeleteAlarmsRequest) AWS Java SDK]]
*/
def deleteAlarms(
deleteAlarmsRequest: DeleteAlarmsRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.deleteAlarmsAsync, deleteAlarmsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#deleteAlarms(com.amazonaws.services.cloudwatch.model.DeleteAlarmsRequest) AWS Java SDK]]
*/
def deleteAlarms(alarmNames: String*): Future[Unit] =
deleteAlarms(new DeleteAlarmsRequest().withAlarmNames(alarmNames: _*))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#describeAlarmHistory(com.amazonaws.services.cloudwatch.model.DescribeAlarmHistoryRequest) AWS Java SDK]]
*/
def describeAlarmHistory(
describeAlarmHistoryRequest: DescribeAlarmHistoryRequest
): Future[DescribeAlarmHistoryResult] =
wrapAsyncMethod(client.describeAlarmHistoryAsync, describeAlarmHistoryRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#describeAlarmHistory() AWS Java SDK]]
*/
def describeAlarmHistory(): Future[DescribeAlarmHistoryResult] =
describeAlarmHistory(new DescribeAlarmHistoryRequest())
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#describeAlarms(com.amazonaws.services.cloudwatch.model.DescribeAlarmsRequest) AWS Java SDK]]
*/
def describeAlarms(
describeAlarmRequest: DescribeAlarmsRequest
): Future[DescribeAlarmsResult] =
wrapAsyncMethod(client.describeAlarmsAsync, describeAlarmRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#describeAlarms() AWS Java SDK]]
*/
def describeAlarms(): Future[DescribeAlarmsResult] =
describeAlarms(new DescribeAlarmsRequest())
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#describeAlarmsForMetric(com.amazonaws.services.cloudwatch.model.DescribeAlarmsForMetricRequest) AWS Java SDK]]
*/
def describeAlarmsForMetric(
describeAlarmsForMetricRequest: DescribeAlarmsForMetricRequest
): Future[DescribeAlarmsForMetricResult] =
wrapAsyncMethod(client.describeAlarmsForMetricAsync, describeAlarmsForMetricRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#disableAlarmActions(com.amazonaws.services.cloudwatch.model.DisableAlarmActionsRequest) AWS Java SDK]]
*/
def disableAlarmActions(
disableAlarmActionsRequest: DisableAlarmActionsRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.disableAlarmActionsAsync, disableAlarmActionsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#disableAlarmActions(com.amazonaws.services.cloudwatch.model.DisableAlarmActionsRequest) AWS Java SDK]]
*/
def disableAlarmActions(alarmNames: String*): Future[Unit] =
disableAlarmActions(new DisableAlarmActionsRequest().withAlarmNames(alarmNames: _*))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#enableAlarmActions(com.amazonaws.services.cloudwatch.model.EnableAlarmActionsRequest) AWS Java SDK]]
*/
def enableAlarmActions(
enableAlarmActionsRequest: EnableAlarmActionsRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.enableAlarmActionsAsync, enableAlarmActionsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#enableAlarmActions(com.amazonaws.services.cloudwatch.model.EnableAlarmActionsRequest) AWS Java SDK]]
*/
def enableAlarmActions(alarmNames: String*): Future[Unit] =
enableAlarmActions(new EnableAlarmActionsRequest().withAlarmNames(alarmNames: _*))
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#getMetricStatistics(com.amazonaws.services.cloudwatch.model.GetMetricStatisticsRequest) AWS Java SDK]]
*/
def getMetricStatistics(
getMetricStatisticsRequest: GetMetricStatisticsRequest
): Future[GetMetricStatisticsResult] =
wrapAsyncMethod(client.getMetricStatisticsAsync, getMetricStatisticsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#listMetrics(com.amazonaws.services.cloudwatch.model.ListMetricsRequest) AWS Java SDK]]
*/
def listMetrics(
listMetricsRequest: ListMetricsRequest
): Future[ListMetricsResult] =
wrapAsyncMethod(client.listMetricsAsync, listMetricsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#listMetrics() AWS Java SDK]]
*/
def listMetrics(): Future[ListMetricsResult] =
listMetrics(new ListMetricsRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#putMetricAlarm(com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest) AWS Java SDK]]
*/
def putMetricAlarm(
putMetricAlarmRequest: PutMetricAlarmRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.putMetricAlarmAsync, putMetricAlarmRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#putMetricData(com.amazonaws.services.cloudwatch.model.PutMetricDataRequest) AWS Java SDK]]
*/
def putMetricData(
putMetricDataRequest: PutMetricDataRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.putMetricDataAsync, putMetricDataRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#putMetricData(com.amazonaws.services.cloudwatch.model.PutMetricDataRequest) AWS Java SDK]]
*/
def putMetricData(
namespace: String,
metricData: Iterable[MetricDatum]
): Future[Unit] =
putMetricData(
new PutMetricDataRequest()
.withNamespace(namespace)
.withMetricData(metricData.asJavaCollection)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#setAlarmState(com.amazonaws.services.cloudwatch.model.SetAlarmStateRequest) AWS Java SDK]]
*/
def setAlarmState(
setAlarmStateRequest: SetAlarmStateRequest
): Future[Unit] =
wrapVoidAsyncMethod(client.setAlarmStateAsync, setAlarmStateRequest)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#setAlarmState(com.amazonaws.services.cloudwatch.model.SetAlarmStateRequest) AWS Java SDK]]
*/
def setAlarmState(
alarmName: String,
stateReason: String,
stateValue: StateValue,
stateReasonData: String = ""
): Future[Unit] =
setAlarmState(
new SetAlarmStateRequest()
.withAlarmName(alarmName)
.withStateReason(stateReason)
.withStateValue(stateValue)
.withStateReasonData(stateReasonData)
)
/**
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/cloudwatch/AmazonCloudWatch.html#shutdown() AWS Java SDK]]
*/
def shutdown(): Unit =
client.shutdown()
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/SampleData.scala
|
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import org.joda.time.DateTime
object SampleData {
val t0 = DateTime.now.minusDays(1)
val t1 = DateTime.now.minusDays(7)
val t2 = DateTime.now.minusDays(14)
val t3 = DateTime.now.minusDays(21)
val sampleForums: Seq[Forum] = Seq(
Forum(
name = "Amazon DynamoDB",
category = "Amazon Web Services",
threads = 2,
messages = 3,
views = 1000
),
Forum(
name = "Amazon S3",
category = "Amazon Web Services",
threads = 1,
messages = 1,
views = 500
)
)
val sampleThreads: Seq[ForumThread] = Seq(
ForumThread(
forumName = "Amazon DynamoDB",
subject = "DynamoDB Thread 1",
message = "DynamoDB thread 1 message",
lastPostedBy = "User A",
lastPostedDateTime = t2,
views = 0,
replies = 0,
answered = 0,
tags = Set("index", "primarykey", "table")
),
ForumThread(
forumName = "Amazon DynamoDB",
subject = "DynamoDB Thread 2",
message = "DynamoDB thread 2 message",
lastPostedBy = "User A",
lastPostedDateTime = t3,
views = 0,
replies = 0,
answered = 0,
tags = Set("index", "primarykey", "rangekey")
),
ForumThread(
forumName = "Amazon S3",
subject = "S3 Thread 1",
message = "S3 thread 1 message",
lastPostedBy = "User A",
lastPostedDateTime = t1,
views = 0,
replies = 0,
answered = 0,
tags = Set("largeobjects", "multipart upload")
)
)
val sampleReplies: Seq[Reply] = Seq(
Reply(
id = "Amazon DynamoDB#DynamoDB Thread 1",
replyDateTime = t3,
message = "DynamoDB Thread 1 Reply 1 text",
postedBy = "User A"
),
Reply(
id = "Amazon DynamoDB#DynamoDB Thread 1",
replyDateTime = t2,
message = "DynamoDB Thread 1 Reply 2 text",
postedBy = "User B"
),
Reply(
id = "Amazon DynamoDB#DynamoDB Thread 2",
replyDateTime = t1,
message = "DynamoDB Thread 2 Reply 1 text",
postedBy = "User A"
),
Reply(
id = "Amazon DynamoDB#DynamoDB Thread 2",
replyDateTime = t0,
message = "DynamoDB Thread 2 Reply 2 text",
postedBy = "User A"
)
)
val sampleGameScores: Seq[GameScore] = Seq(
GameScore(
userId = "101",
gameTitle = "Galaxy Invaders",
topScore = 5842,
topScoreDateTime = DateTime.now.minusDays(1),
wins = 21,
losses = 72
),
GameScore(
userId = "101",
gameTitle = "Meteor Blasters",
topScore = 1000,
topScoreDateTime = DateTime.now.minusDays(2),
wins = 12,
losses = 3
),
GameScore(
userId = "101",
gameTitle = "Starship X",
topScore = 24,
topScoreDateTime = DateTime.now.minusDays(3),
wins = 4,
losses = 9
),
GameScore(
userId = "102",
gameTitle = "Alien Adventure",
topScore = 192,
topScoreDateTime = DateTime.now.minusDays(4),
wins = 32,
losses = 192
),
GameScore(
userId = "102",
gameTitle = "Galaxy Invaders",
topScore = 0,
topScoreDateTime = DateTime.now.minusDays(5),
wins = 0,
losses = 5
),
GameScore(
userId = "103",
gameTitle = "Attack Ships",
topScore = 3,
topScoreDateTime = DateTime.now.minusDays(6),
wins = 1,
losses = 8
),
GameScore(
userId = "103",
gameTitle = "Galaxy Invaders",
topScore = 2317,
topScoreDateTime = DateTime.now.minusDays(7),
wins = 40,
losses = 3
),
GameScore(
userId = "103",
gameTitle = "Meteor Blasters",
topScore = 723,
topScoreDateTime = DateTime.now.minusDays(8),
wins = 22,
losses = 12
),
GameScore(
userId = "103",
gameTitle = "Starship X",
topScore = 42,
topScoreDateTime = DateTime.now.minusDays(9),
wins = 4,
losses = 19
)
)
}
|
sunilake/aws-wrap
|
scaladoc.sbt
|
<filename>scaladoc.sbt<gh_stars>0
import scala.util.matching.Regex.Match
scalacOptions in (Compile, doc) :=
Seq(
"-encoding", "UTF-8",
"-sourcepath", baseDirectory.value.getAbsolutePath,
"-doc-source-url", s"https://github.com/dwhjames/aws-wrap/tree/v${version.value}€{FILE_PATH}.scala")
autoAPIMappings := true
apiURL := Some(url("https://dwhjames.github.io/aws-wrap/api/current/"))
apiMappings ++= {
val builder = Map.newBuilder[sbt.File, sbt.URL]
val jarFiles = (managedClasspath in Compile).value.files
jarFiles.filter(file => file.toString.contains("com.amazonaws/aws-java-sdk")).foreach { awsJarFile =>
builder += awsJarFile -> url("http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/")
}
val bootPath = System.getProperty("sun.boot.library.path")
if (bootPath ne null) {
builder += file(bootPath + "/rt.jar") -> url("http://docs.oracle.com/javase/6/docs/api/")
}
builder.result()
}
lazy val transformJavaDocLinksTask = taskKey[Unit](
"Transform JavaDoc links - replace #java.io.File with ?java/io/File.html"
)
transformJavaDocLinksTask := {
val log = streams.value.log
log.info("Transforming JavaDoc links")
val t = (target in (Compile, doc)).value
(t ** "*.html").get.filter(hasJavadocApiLink).foreach { f =>
log.debug("Transforming " + f)
val content1 = javadocApiLink.replaceAllIn(IO.read(f), transformJavaDocLinks)
val content2 = awsJavadocApiLink.replaceAllIn(content1, transformJavaDocLinks)
IO.write(f, content2)
}
}
val transformJavaDocLinks: Match => String = m =>
"href=\"" + m.group(1) + "?" + m.group(2).replace(".", "/") + ".html"
val javadocApiLink = """href=\"(http://docs\.oracle\.com/javase/6/docs/api/index\.html)#([^"]*)""".r
val awsJavadocApiLink = """href=\"(http://docs\.aws\.amazon\.com/AWSJavaSDK/latest/javadoc/index\.html)#([^"]*)""".r
def hasJavadocApiLink(f: File): Boolean = {
val content = IO.read(f)
(javadocApiLink findFirstIn content).nonEmpty ||
(awsJavadocApiLink findFirstIn content).nonEmpty
}
transformJavaDocLinksTask <<= transformJavaDocLinksTask triggeredBy (doc in Compile)
|
sunilake/aws-wrap
|
src/main/scala/dynamodb/mapper.scala
|
<gh_stars>0
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap
package dynamodb
import scala.language.implicitConversions
import scala.concurrent.{Future, ExecutionContext}
import scala.collection.JavaConverters._
import scala.collection.mutable
import java.util.{Map => JMap}
import com.amazonaws.services.dynamodbv2.model._
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A trait for serializers that convert Scala objects
* to and from DynamoDB items.
*
* @tparam T the object type of this serializer
*/
trait DynamoDBSerializer[T] {
/**
* The DynamoDB table that this serializer operates on.
*/
def tableName: String
/**
* The name of the attribute that forms the
* primary hash key.
*/
def hashAttributeName: String
/**
* The name of the attribute that forms the
* primary range key.
*
* This is optional, as a table may not have
* a range key.
*/
def rangeAttributeName: Option[String] = None
/**
* Converts a DynamoDB item into a Scala object.
*
* @param item
* A map from attribute names to attribute values.
* @return the deserialized object of type T.
*/
def fromAttributeMap(item: mutable.Map[String, AttributeValue]): T
/**
* Converts a Scala object into a DynamoDB item.
*
* @param obj
* An object of type T.
* @return a map from attribute names to attribute values.
*/
def toAttributeMap(obj: T): Map[String, AttributeValue]
/*
* A helper for implementing toAttributeMap.
*
* {{{
* override def toAttributeMap(obj: Foo): Map[String, AttributeValue] =
* Map(
* mkAtrribute("company", obj.company),
* ...
* )
* }}}
*/
protected def mkAttribute[K](name: String, value: K)(implicit conv: K => AttributeValue): (String, AttributeValue) =
(name, conv(value))
/*
* A helper for implementing toAttributeMap.
*
* {{{
* override def toAttributeMap(obj: Foo): Map[String, AttributeValue] =
* Map(
* mkAtrribute("company" -> obj.company),
* ...
* )
* }}}
*/
protected def mkAttribute[K](pair: (String, K))(implicit conv: K => AttributeValue): (String, AttributeValue) =
(pair._1, conv(pair._2))
/**
* Converts a Scala object into a DynamoDB key.
*
* The key is represented as a map. The concrete implementation
* of a serializer may want to override this method for
* efficiency reasons, as the default implementation uses
* [[toAttributeMap]].
*
* @param obj
* An object of type T.
* @return a map from attribute names to attribute values.
*/
def primaryKeyOf(obj: T): Map[String, AttributeValue] = {
val attributes = toAttributeMap(obj)
val builder = Map.newBuilder[String, AttributeValue]
builder += (hashAttributeName -> attributes(hashAttributeName))
if (rangeAttributeName.isDefined)
builder += (rangeAttributeName.get -> attributes(rangeAttributeName.get))
builder.result
}
/**
* Converts a hash key value into a DynamoDB key.
*
* The key is represented as a map.
*
* @param hashKey
* An value that is convertable to an [[com.github.dwhjames.awswrap.dynamodb.AttributeValue AttributeValue]].
* @return a map from attribute names to attribute values.
*/
def makeKey[K](hashKey: K)(implicit conv: K => AttributeValue): Map[String, AttributeValue] =
Map(hashAttributeName -> conv(hashKey))
/**
* Converts hash and range key values into a DynamoDB key.
*
* The key is represented as a map.
*
* @param hashKey
* An value that is convertable to an [[com.github.dwhjames.awswrap.dynamodb.AttributeValue AttributeValue]].
* @param rangeKey
* An value that is convertable to an [[com.github.dwhjames.awswrap.dynamodb.AttributeValue AttributeValue]].
* @return a map from attribute names to attribute values.
*/
def makeKey[K1, K2](
hashKey: K1,
rangeKey: K2
)(implicit
conv1: K1 => AttributeValue,
conv2: K2 => AttributeValue
): Map[String, AttributeValue] =
Map(
hashAttributeName -> conv1(hashKey),
(rangeAttributeName getOrElse {
throw new UnsupportedOperationException(s"DynamoDBSerializer.makeKey: table $tableName does not have a range key")
} ) -> conv2(rangeKey)
)
}
/**
* A trait for configuring [[AmazonDynamoDBScalaMapper]].
*/
trait AmazonDynamoDBScalaMapperConfig {
/**
* Transform a table name.
*
* Concrete implementations will rewrite
* tables names, given an input table name.
*
* @param tableName
* the table name to transform.
* @return the transformed table name.
*/
def transformTableName(tableName: String): String
/**
* Choose the read consistency behavior.
*
* `true` configures the mapper for consistent reads.
*/
val consistentReads: Boolean
}
/**
* A factory for [[AmazonDynamoDBScalaMapperConfig]].
*/
object AmazonDynamoDBScalaMapperConfig {
/**
* Construct a mapper configuration.
*
* @param nameFunction
* the transformation to apply to the table name.
* @param consistent
* set the consistency of reads.
* @return a new configuration
*/
def apply(
nameFunction: String => String = identity,
consistent: Boolean = false
): AmazonDynamoDBScalaMapperConfig = new AmazonDynamoDBScalaMapperConfig {
override def transformTableName(tableName: String) =
nameFunction(tableName)
override val consistentReads = consistent
}
/**
* A default [[AmazonDynamoDBScalaMapperConfig]].
*
* Provides a default configuration for
* [[AmazonDynamoDBScalaMapper]].
*/
object Default extends AmazonDynamoDBScalaMapperConfig {
/**
* Returns a table name untransformed.
*
* The default transformation on table names is
* the identity transformation.
*
* @param table
* the table name to transform.
* @return the same table name.
*/
override def transformTableName(tableName: String) = tableName
/**
* The default is eventual consistency.
*/
override val consistentReads = false
}
}
/**
* An object mapping for DynamoDB items.
*
* This trait provides the interface to an object mapper for DynamoDB.
* It depends on a concrete implementation of [[AmazonDynamoDBScalaClient]].
*/
trait AmazonDynamoDBScalaMapper {
private type DynamoDBKey = JMap[String, AttributeValue]
/**
* An abstract [[AmazonDynamoDBScalaClient]].
*/
val client: AmazonDynamoDBScalaClient
/**
* An abstract ExecutionContext.
*/
protected implicit val execCtx: ExecutionContext
/**
* The mapping configuration.
*
* [[AmazonDynamoDBScalaMapperConfig.Default]] is used by default.
*/
protected val config: AmazonDynamoDBScalaMapperConfig =
AmazonDynamoDBScalaMapperConfig.Default
/**
* Returns the table name.
*
* Determines the table name, by transforming the table name
* of the implict serializer using the mapper's configuration.
*
* @param serializer
* the object serializer.
* @return the transformed table name.
*/
protected def tableName[T](implicit serializer: DynamoDBSerializer[T]): String =
config.transformTableName(serializer.tableName)
private val logger: Logger = LoggerFactory.getLogger(classOf[AmazonDynamoDBScalaMapper])
/**
* A method overloading container for [[deleteByKey]].
*
* This class contains the overloaded implementations
* of [[deleteByKey]].
*
* @tparam T
* the type of the object to be deleted.
* @see [[deleteByKey]]
*/
trait DeleteByKeyMagnet[T] {
def apply(): Future[Option[T]]
}
object DeleteByKeyMagnet {
/**
* Delete a DynamoDB item by a hash key.
*
* If an item with the given key was deleted then the
* deleted object returned, otherwise none.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashKey
* a string, number, or byte array that is the hash key value of the
* item to be deleted.
* @param serializer
* an implicit object serializer.
* @return the deleted object, or None, in a future.
* @see [[deleteByKey]]
*/
implicit def deleteByHashKey
[T, K]
(hashKey: K)
(implicit serializer: DynamoDBSerializer[T], ev: K => AttributeValue)
: DeleteByKeyMagnet[T] = new DeleteByKeyMagnet[T] { def apply() = {
val request =
new DeleteItemRequest()
.withTableName(tableName)
.withKey(serializer.makeKey(hashKey).asJava)
.withReturnValues(ReturnValue.ALL_OLD)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.deleteItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"deleteByKey() ConsumedCapacity = ${result.getConsumedCapacity()}")
Option { result.getAttributes } map { item =>
serializer.fromAttributeMap(item.asScala)
}
}
}}
/**
* Delete a DynamoDB item by a hash key and range key.
*
* If an item with the given key was deleted then the
* deleted object returned, otherwise none.
*
* @tparam K1
* a type that is viewable as an [[AttributeValue]].
* @tparam K2
* a type that is viewable as an [[AttributeValue]].
* @param hashKey
* a string, number, or byte array that is the hash key value of the
* item to be deleted.
* @param rangeKey
* a string, number, or byte array that is the range key value of the
* item to be deleted.
* @param serializer
* an implicit object serializer.
* @return the deleted object, or None, in a future.
* @see [[deleteByKey]]
*/
implicit def deletebyHashAndRangeKey
[T, K1, K2]
(tuple: /* hashKey */ (K1,
/* rangeKey */ K2))
(implicit serializer: DynamoDBSerializer[T],
ev1: K1 => AttributeValue, ev2: K2 => AttributeValue)
: DeleteByKeyMagnet[T] = new DeleteByKeyMagnet[T] { def apply() = {
val request =
new DeleteItemRequest()
.withTableName(tableName)
.withKey(serializer.makeKey(tuple._1, tuple._2).asJava)
.withReturnValues(ReturnValue.ALL_OLD)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.deleteItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"deleteByKey() ConsumedCapacity = ${result.getConsumedCapacity()}")
Option { result.getAttributes } map { item =>
serializer.fromAttributeMap(item.asScala)
}
}
}}
}
/**
* Delete a DynamoDB item by a hash key (and range key).
*
* If an item with the given key was deleted then the
* deleted object returned, otherwise none.
*
* @tparam T
* the type of the object to be deleted.
* @see [[DeleteByKeyMagnet]]
*/
def deleteByKey[T](magnet: DeleteByKeyMagnet[T]): Future[Option[T]] = magnet()
/**
* Delete the DynamoDB item that corresponds to the given object
*
* @tparam T
* the type of the object to delete.
* @param obj
* the object to delete.
* @param serializer
* an implicit object serializer.
*/
def delete[T](
obj: T
)(implicit serializer: DynamoDBSerializer[T]): Future[Unit] = {
val request =
new DeleteItemRequest()
.withTableName(tableName)
.withKey(serializer.primaryKeyOf(obj).asJava)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.deleteItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"delete() ConsumedCapacity = ${result.getConsumedCapacity()}")
}
}
/**
* Dumps an object into DynamoDB
*
* If the object is new, then this creates the item in DynamoDB,
* otherwise it overwrites the exisiting item.
*
* @tparam T
* the type of the object to put.
* @param obj
* the object to put.
* @param serializer
* an implicit object serializer.
*/
def dump[T](
obj: T
)(implicit serializer: DynamoDBSerializer[T]): Future[Unit] = {
val request =
new PutItemRequest()
.withTableName(tableName)
.withItem(serializer.toAttributeMap(obj).asJava)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.putItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"dump() ConsumedCapacity = ${result.getConsumedCapacity()}")
}
}
/**
* A method overloading container for [[loadByKey]].
*
* This class contains the two overloaded implementations
* of [[loadByKey]].
*
* @tparam T
* the type of the object to be loaded.
* @see [[loadByKey]]
*/
trait LoadByKeyMagnet[T] {
def apply(): Future[Option[T]]
}
object LoadByKeyMagnet {
/**
* Load an object by its hash key.
*
* If the item is not found in the DynamoDB table,
* then None is returned.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashKey
* the hash key of the object to retrieve.
* @param serializer
* an implicit object serializer.
* @return the retreived object, or None, in a future.
* @see [[loadByKey]]
*/
implicit def loadByHashKey
[T, K]
(hashKey: K)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: LoadByKeyMagnet[T] = new LoadByKeyMagnet[T] { def apply() = {
val request =
new GetItemRequest()
.withTableName(tableName)
.withKey(serializer.makeKey(hashKey).asJava)
.withConsistentRead(config.consistentReads)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.getItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"loadByKey() ConsumedCapacity = ${result.getConsumedCapacity()}")
Option { result.getItem } map { item =>
serializer.fromAttributeMap(item.asScala)
}
}
}}
/**
* Load an object by its hash key and range key.
*
* If the item is not found in the DynamoDB table,
* then None is returned.
*
* @tparam K1
* a type that is viewable as an [[AttributeValue]].
* @tparam K2
* a type that is viewable as an [[AttributeValue]].
* @param hashKey
* the hash key of the object to retrieve.
* @param rangeKey
* the range key of the object to retrieve.
* @param serializer
* an implicit object serializer.
* @return the retreived object, or None, in a future.
* @see [[loadByKey]]
*/
implicit def loadByHashAndRangeKey
[T, K1, K2]
(tuple: /* hashKey */ (K1,
/* rangeKey */ K2))
(implicit serializer: DynamoDBSerializer[T],
ev1: K1 => AttributeValue,
ev2: K2 => AttributeValue)
: LoadByKeyMagnet[T] = new LoadByKeyMagnet[T] { def apply() = {
val request =
new GetItemRequest()
.withTableName(tableName)
.withKey(serializer.makeKey(tuple._1, tuple._2).asJava)
.withConsistentRead(config.consistentReads)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.getItem(request) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"loadByKey() ConsumedCapacity = ${result.getConsumedCapacity()}")
Option { result.getItem } map { item =>
serializer.fromAttributeMap(item.asScala)
}
}
}}
}
/**
* Load an object by its hash key (and range key).
*
* If the item is not found in the DynamoDB table,
* then None is returned.
*
* @tparam T
* the type of the object to be loaded.
* @see [[LoadByKeyMagnet]]
*/
def loadByKey[T](magnet: LoadByKeyMagnet[T]): Future[Option[T]] = magnet()
/**
* Scan a table.
*
* This method will internally make repeated scan calls
* until the full result of the scan has been retrieved.
*
* @param scanFilter
* the optional filter conditions for the scan.
* @return sequence of scanned objects in a future.
* @see [[countScan]]
*/
def scan[T](
scanFilter: Map[String, Condition] = Map.empty
)(implicit serializer: DynamoDBSerializer[T]): Future[Seq[T]] = {
val builder = Seq.newBuilder[T]
def local(lastKey: Option[DynamoDBKey] = None): Future[Seq[T]] =
scanProgressively(scanFilter, lastEvaluatedKey = lastKey).flatMap {
case (key, result) =>
builder ++= result
key match {
case None => Future.successful(builder.result())
case optKey => local(optKey)
}
}
local()
}
/**
* Scan a table.
*
* This method will issue one scan request, stopping either
* at the supplied limit or at the response size limit.
*
* @param scanFilter
* the optional filter conditions for the scan.
* @param limit
* the optional limit for the number of items to return.
* @return sequence of scanned objects in a future.
* @see [[countScan]]
*/
def scanOnce[T](
scanFilter: Map[String, Condition] = Map.empty,
limit: Int = 0
)(implicit serializer: DynamoDBSerializer[T]): Future[Seq[T]] = {
scanProgressively(scanFilter, limit).map(_._2)
}
/**
* Scan a table.
*
* This method will issue one scan request, stopping either
* at the supplied limit or at the response size limit.
*
* @param scanFilter
* the optional filter conditions for the scan.
* @param lastEvaluatedKey
* the optional starting key.
* @param limit
* the optional limit for the number of items to return.
* @return Tuple of
* some last dynamoDB key or none if the "end" of the scan is reached
* and sequence of scanned objects in a future.
* @see [[countScan]]
*/
def scanProgressively[T](
scanFilter: Map[String, Condition] = Map.empty,
limit: Int = 0,
lastEvaluatedKey: Option[DynamoDBKey] = None
)(implicit serializer: DynamoDBSerializer[T]): Future[(Option[DynamoDBKey], Seq[T])] = {
val scanRequest =
new ScanRequest()
.withTableName(tableName)
.withScanFilter(scanFilter.asJava)
if (limit > 0)
scanRequest.setLimit(limit)
if (logger.isDebugEnabled)
scanRequest.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.scan(
scanRequest.withExclusiveStartKey(lastEvaluatedKey.orNull)
) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"scanOnce() ConsumedCapacity = ${result.getConsumedCapacity()}")
val r = result.getItems.asScala map { item =>
serializer.fromAttributeMap(item.asScala)
}
val lastEvaluatedKey = Option {
result.getLastEvaluatedKey
}
(lastEvaluatedKey, r)
}
}
/**
* Scan a table and return a count.
*
* This method will internally make repeated scan calls
* until the full result of the scan has been retrieved.
*
* @param scanFilter
* the optional filter conditions for the scan
* @return the total number of scanned items in a future
* @see [[scan]]
*/
def countScan[T](
scanFilter: Map[String, Condition] = Map.empty
)(implicit serializer: DynamoDBSerializer[T]): Future[Long] = {
val scanRequest =
new ScanRequest()
.withTableName(tableName)
.withScanFilter(scanFilter.asJava)
.withSelect(Select.COUNT)
if (logger.isDebugEnabled)
scanRequest.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
def local(count: Long = 0L, lastKey: Option[DynamoDBKey] = None): Future[Long] =
client.scan(
scanRequest.withExclusiveStartKey(lastKey.orNull)
) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"countScan() ConsumedCapacity = ${result.getConsumedCapacity()}")
val newCount = count + result.getCount
Option { result.getLastEvaluatedKey } match {
case None => Future.successful(newCount)
case optKey => local(newCount, optKey)
}
}
local()
}
type RangeCondition = (String,Condition)
/**
* A method overloading container for [[query]].
*
* This class contains the overloaded implementations of [[query]].
*
* @tparam T
* the type of the object returned by the query.
* @see [[query]]
*/
trait QueryMagnet[T] {
def apply(): Future[Seq[T]]
}
object QueryMagnet {
/**
* Query a table.
*
* This is the most primitive overload, which takess a raw
* query request object.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @param queryRequest
* the query request object.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/QueryRequest.html QueryRequest]]
*/
implicit def queryRequest1
[T]
(queryRequest: QueryRequest)
(implicit serializer: DynamoDBSerializer[T])
: QueryMagnet[T] =
queryRaw(queryRequest)
/**
* Query a table, with a limit.
*
* This is the most primitive overload, which takess a raw
* query request object.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* @param queryRequest
* the query request object.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/QueryRequest.html QueryRequest]]
*/
implicit def queryRequest2
[T]
(tuple: /* queryRequest */ (QueryRequest,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T])
: QueryMagnet[T] =
queryRaw(tuple._1, Some(tuple._2))
private def queryRaw
[T]
(queryRequest: QueryRequest, totalLimit: Option[Int] = None)
(implicit serializer: DynamoDBSerializer[T])
: QueryMagnet[T] = new QueryMagnet[T] { def apply() = {
// note this mutates the query request
queryRequest
.withTableName(tableName)
.withConsistentRead(config.consistentReads)
if (logger.isDebugEnabled)
queryRequest.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
val builder = Seq.newBuilder[T]
def local(lastKey: Option[DynamoDBKey] = None, numberLeftToFetch: Option[Int] = None): Future[Unit] =
client.query(
queryRequest.withExclusiveStartKey(lastKey.orNull)
) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"query() ConsumedCapacity = ${result.getConsumedCapacity}")
val queryResult = result.getItems.asScala map { item =>
serializer.fromAttributeMap(item.asScala)
}
builder ++= (numberLeftToFetch match {
case Some(n) if n <= queryResult.size => queryResult.take(n)
case _ => queryResult
})
val optKey = Option { result.getLastEvaluatedKey }
if (optKey.isEmpty) Future.successful(())
else numberLeftToFetch match {
case Some(n) if n <= queryResult.size => Future.successful(())
case Some(n) => local(optKey, Some(n - queryResult.size))
case None => local(optKey, None)
}
}
local(None, totalLimit) map { _ => builder.result() }
}}
/**
* Query a table by a hash key value.
*
* The result will be all items with the same hash key
* value, but varying range keys.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHash1
[T, K]
(hashValue: K)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHash(hashValue)
/**
* Query a table by a hash key value, with a limit.
*
* The result will be all items with the same hash key
* value, but varying range keys.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHash2
[T, K]
(tuple: /* hashValue */ (K,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHash(tuple._1, Some(tuple._2))
private def queryOnHash
[T, K]
(hashValue: K, totalLimit: Option[Int] = None)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryRaw(mkHashKeyQuery(hashValue), totalLimit)
/**
* Query a table by a hash value and range condition.
*
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHashAndRange1
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHashAndRange(tuple._1, tuple._2, true, None)
/**
* Query a table by a hash value and range condition,
* ascending or desending.
*
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHashAndRange2
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHashAndRange(tuple._1, tuple._2, tuple._3, None)
/**
* Query a table by a hash value and range condition,
* with a limit.
*
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHashAndRange3
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHashAndRange(tuple._1, tuple._2, true, Some(tuple._3))
/**
* Query a table by a hash value and range condition,
* ascending or desending, with a limit.
*
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnHashAndRange4
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnHashAndRange(tuple._1, tuple._2, tuple._3, Some(tuple._4))
private def queryOnHashAndRange
[T, K]
(hashValue: K,
rangeCondition: Condition,
scanIndexForward: Boolean,
totalLimit: Option[Int])
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryRaw(mkHashAndRangeKeyQuery(hashValue, rangeCondition).withScanIndexForward(scanIndexForward), totalLimit)
/**
* Query a secondary index by a hash value and range condition.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnSecondaryIndex1
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, true, None)
/**
* Query a secondary index by a hash value and range condition,
* ascending or desending.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnSecondaryIndex2
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, None)
/**
* Query a secondary index by a hash value and range condition,
* with a limit.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnSecondaryIndex3
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, true, Some(tuple._5))
/**
* Query a secondary index by a hash value and range condition,
* ascending or desending, with a limit.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnSecondaryIndex4
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, Some(tuple._6))
private def queryOnSecondaryIndex
[T, K]
(indexName: String,
hashValue: K,
rangeAttributeName: String,
rangeCondition: Condition,
scanIndexForward: Boolean = true,
totalLimit: Option[Int] = None)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnAnyIndex(indexName, serializer.hashAttributeName, hashValue, Some((rangeAttributeName, rangeCondition)), scanIndexForward, totalLimit)
/**
* Query a global secondary index by a hash value and optional
* range condition, ascending or desending, with a limit.
*
* This query targets a named global secondary index. The index
* being used must be named, as well as the name of
* the hash attribute used and the target key of the index.
* If the index contains a range then the name of
* the range attribute used must also be given along with the
* target range key.
* The result will be all items with the same hash key
* value, and range keys that match the optional range condition.
*
* This method will internally make repeated query calls
* until at most the given limit has been retrieved.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashAttributeName
* the name of the key attribute used by the index.
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* an optional tuple who's first member is the name (String) of the range key attribute used by the index,
* and who's second member is the Condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnGlobalSecondaryIndex
[T, K]
(tuple: /* indexName */ (String,
/* hashAttributeName */ String,
/* hashValue */ K,
/* rangeCondition */ Option[RangeCondition],
/* scanIndexForward */ Boolean,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] =
queryOnAnyIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, Some(tuple._6))
private def queryOnAnyIndex
[T, K]
(indexName: String,
hashAttributeName: String,
hashValue: K,
rangeCondition: Option[RangeCondition],
scanIndexForward: Boolean = true,
totalLimit: Option[Int] = None)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryMagnet[T] = {
val keyConditions = {
val b = Map.newBuilder[String, Condition]
b += (hashAttributeName -> QueryCondition.equalTo(hashValue))
rangeCondition.foreach(b += _)
b.result()
}
queryRaw(
new QueryRequest()
.withIndexName(indexName)
.withKeyConditions( keyConditions.asJava )
.withSelect(Select.ALL_ATTRIBUTES)
.withScanIndexForward(scanIndexForward),
totalLimit
)
}
}
/**
* Query a table.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved,
* or the at most the size of the limit, if specified.
*
* @tparam T
* the type of the object returned by the query.
* @see [[QueryMagnet]]
* @see [[queryOnce]]
* @see [[countQuery]]
*/
def query[T](magnet: QueryMagnet[T]): Future[Seq[T]] = magnet()
trait QueryOnceMagnet[T] {
def apply(): Future[Seq[T]]
}
object QueryOnceMagnet {
/**
* Query a table.
*
* This is the most primitive overload, which takes a raw
* query request object.
*
* This method will issue one query request, stopping
* at the response size limit.
*
* @param queryRequest
* the query request object.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[queryOnce]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/QueryRequest.html QueryRequest]]
*/
implicit def queryOnceWithQueryRequest
[T]
(queryRequest: QueryRequest)
(implicit serializer: DynamoDBSerializer[T])
: QueryOnceMagnet[T] = new QueryOnceMagnet[T] { def apply(): Future[Seq[T]] = {
// note this mutates the query request
queryRequest
.withTableName(tableName)
.withConsistentRead(config.consistentReads)
if (logger.isDebugEnabled)
queryRequest.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.query(
queryRequest
) map { result =>
if (logger.isDebugEnabled)
logger.debug(s"queryOnce() ConsumedCapacity = ${result.getConsumedCapacity()}")
result.getItems.asScala.view map { item =>
serializer.fromAttributeMap(item.asScala)
}
}
}}
/**
* Query a table by a hash key value.
*
* The result will be all items with the same hash key
* value, but varying range keys.
*
* This method will issue one query request, stopping either
* at the supplied limit or at the response size limit.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param limit
* the optional limit for the number of items to return.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[queryOnce]]
*/
def queryOnceWithHashValue
[T, K]
(hashValue: K,
limit: Int)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] = {
val request = mkHashKeyQuery(hashValue)
if (limit > 0) request.setLimit(limit)
queryOnceWithQueryRequest(request)
}
implicit def queryOnceWithHashValue1
[T, K]
(tuple: /* hashValue */ (K,
/* limit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValue(tuple._1, tuple._2)
implicit def queryOnceWithHashValue2
[T, K]
(hashValue: K)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValue(hashValue, 0)
/**
* Query a table by a hash value and range condition.
*
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will issue one query request, stopping either
* at the supplied limit or at the response size limit.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true (default) for forwards scan, and false for reverse scan.
* @param limit
* the optional limit for the number of items to return.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[queryOnce]]
*/
def queryOnceWithHashValueRangeCondition
[T, K]
(hashValue: K,
rangeCondition: Condition,
scanIndexForward: Boolean,
limit: Int)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] = {
val request =
mkHashAndRangeKeyQuery(hashValue, rangeCondition)
.withScanIndexForward(scanIndexForward)
if (limit > 0) request.setLimit(limit)
queryOnceWithQueryRequest(request)
}
implicit def queryOnceWithHashValueRangeCondition1
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean,
/* limit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValueRangeCondition(tuple._1, tuple._2, tuple._3, tuple._4)
implicit def queryOnceWithHashValueRangeCondition2
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValueRangeCondition(tuple._1, tuple._2, tuple._3, 0)
implicit def queryOnceWithHashValueRangeCondition3
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition,
/* limit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValueRangeCondition(tuple._1, tuple._2, true, tuple._3)
implicit def queryOnceWithHashValueRangeCondition4
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceWithHashValueRangeCondition(tuple._1, tuple._2, true, 0)
/**
* Query a secondary index by a hash value and range condition.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all items with the same hash key
* value, and range keys that match the range condition.
*
* This method will issue one query request, stopping either
* at the supplied limit or at the response size limit.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param scanIndexForward
* true (default) for forwards scan, and false for reverse scan.
* @param limit
* the optional limit for the number of items to return.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[queryOnce]]
*/
def queryOnceSecondaryIndex
[T, K]
(indexName: String,
hashValue: K,
rangeAttributeName: String,
rangeCondition: Condition,
scanIndexForward: Boolean,
limit: Int)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceOnAnyIndex(indexName, serializer.hashAttributeName, hashValue, Some((rangeAttributeName, rangeCondition)), scanIndexForward, limit)
/**
* Query a global secondary index by a hash value and optional
* range condition, ascending or desending, with a limit.
*
* This query targets a named global secondary index. The index
* being used must be named, as well as the name of
* the hash attribute used and the target key of the index.
* If the index contains a range then the name of
* the range attribute used must also be given along with the
* target range key.
* The result will be all items with the same hash key
* value, and range keys that match the optional range condition.
*
* This method will issue one query request, stopping either
* at the supplied limit or at the response size limit.
*
* Note that all attributes will be requested, so that
* the serializer will get a complete item. This may incur
* extra read capacity, depending on what attributes
* are projected into the index.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashAttributeName
* the name of the key attribute used by the index.
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* an optional tuple who's first member is the name (String) of the range key attribute used by the index,
* and who's second member is the Condition to apply to the range key.
* @param scanIndexForward
* true for forwards scan, and false for reverse scan.
* @param totalLimit
* the total number of results you want.
* @param serializer
* an implicit object serializer.
* @return result sequence of the query in a future.
* @see [[query]]
*/
implicit def queryOnceOnGlobalSecondaryIndex
[T, K]
(tuple: /* indexName */ (String,
/* hashAttributeName */ String,
/* hashValue */ K,
/* rangeCondition */ Option[RangeCondition],
/* scanIndexForward */ Boolean,
/* totalLimit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceOnAnyIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, tuple._6)
private def queryOnceOnAnyIndex
[T, K]
(indexName: String,
hashAttributeName: String,
hashValue: K,
rangeCondition: Option[RangeCondition],
scanIndexForward: Boolean = true,
totalLimit: Int)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] = {
val keyConditions = {
val b = Map.newBuilder[String, Condition]
b += (hashAttributeName -> QueryCondition.equalTo(hashValue))
rangeCondition.foreach(b += _)
b.result()
}
val request =
new QueryRequest()
.withIndexName(indexName)
.withKeyConditions( keyConditions.asJava )
.withSelect(Select.ALL_ATTRIBUTES)
.withScanIndexForward(scanIndexForward)
if (totalLimit > 0) request.setLimit(totalLimit)
queryOnceWithQueryRequest(request)
}
implicit def queryOnceSecondaryIndex1
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean,
/* limit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, tuple._6)
implicit def queryOnceSecondaryIndex2
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* scanIndexForward */ Boolean))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, tuple._5, 0)
implicit def queryOnceSecondaryIndex3
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition,
/* limit */ Int))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, true, tuple._5)
implicit def queryOnceSecondaryIndex4
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: QueryOnceMagnet[T] =
queryOnceSecondaryIndex(tuple._1, tuple._2, tuple._3, tuple._4, true, 0)
}
/**
* Query a table.
*
* This method will issue one query request, stopping either
* at the supplied limit or at the response size limit.
*
* @tparam T
* the type of the object returned by the query.
* @see [[QueryOnceMagnet]]
* @see [[query]]
* @see [[countQuery]]
*/
def queryOnce[T](magnet: QueryOnceMagnet[T]): Future[Seq[T]] = magnet()
/**
* A method overloading container for [[countQuery]].
*
* This class contains the overloaded implementations of [[countQuery]].
*
* @tparam T
* the type of object queried.
* @see [[countQuery]]
*/
trait CountQueryMagnet[T] {
def apply(): Future[Long]
}
object CountQueryMagnet {
/**
* Query a table, counting the results.
*
* This is the most primitive overload, which takes a raw
* query request object.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @param queryRequest
* the query request object.
* @param serializer
* an implicit object serializer.
* @return the total number items that match the query in a future.
* @see [[countQuery]]
* @see [[http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/model/QueryRequest.html QueryRequest]]
*/
implicit def countQueryRequest
[T]
(queryRequest: QueryRequest)
(implicit serializer: DynamoDBSerializer[T])
: CountQueryMagnet[T] = new CountQueryMagnet[T] { def apply() = {
// note this mutates the query request
queryRequest
.withTableName(tableName)
.withSelect(Select.COUNT)
.withConsistentRead(config.consistentReads)
if (logger.isDebugEnabled)
queryRequest.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
def local(count: Long = 0L, lastKey: Option[DynamoDBKey] = None): Future[Long] =
client.query(
queryRequest.withExclusiveStartKey(lastKey.orNull)
) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"countQuery() ConsumedCapacity = ${result.getConsumedCapacity()}")
val newCount = count + result.getCount
Option { result.getLastEvaluatedKey } match {
case None => Future.successful(newCount)
case optKey => local(newCount, optKey)
}
}
local()
}}
/**
* Query a table by a hash key value, counting the results.
*
* The result will be the count of all items with the
* same hash key value, but varying range keys.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param serializer
* an implicit object serializer.
* @return the total number items that match the query in a future.
* @see [[countQuery]]
*/
implicit def countQueryHashValue
[T, K]
(hashValue: K)
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: CountQueryMagnet[T] =
countQueryRequest(mkHashKeyQuery(hashValue))
/**
* Query a table by a hash value and range condition,
* counting the results.
*
* The result will be the count of all items with
* the same hash key value, and range keys that
* match the range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashValue
* the hash key value to match.
* @param rangeCondition
* the condition to apply to the range key.
* @param serializer
* an implicit object serializer.
* @return the total number items that match the query in a future.
* @see [[countQuery]]
*/
implicit def countQueryHashValueAndRangeCondition
[T, K]
(tuple: /* hashValue */ (K,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: CountQueryMagnet[T] =
countQueryRequest(mkHashAndRangeKeyQuery(tuple._1, tuple._2))
/**
* Query a secondary index by a hash value and range
* condition, counting the results.
*
* This query targets a named secondary index. The index
* being used must be named, as well well at the name of
* the attribute used as a range key in the index.
* The result will be all the count of all items with the
* same hash key value, and range keys that match the
* range condition.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param indexName
* the name of the secondary index to query.
* @param hashValue
* the hash key value to match.
* @param rangeAttributeName
* the name of the range key attribute used by the index.
* @param rangeCondition
* the condition to apply to the range key.
* @param serializer
* an implicit object serializer.
* @return the total number items that match the query in a future.
* @see [[countQuery]]
*/
implicit def countQuerySecondaryIndex
[T, K]
(tuple: /* indexName */ (String,
/* hashValue */ K,
/* rangeAttributeName */ String,
/* rangeCondition */ Condition))
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: CountQueryMagnet[T] =
countQueryRequest(
new QueryRequest()
.withIndexName(tuple._1)
.withKeyConditions(
Map(
serializer.hashAttributeName -> QueryCondition.equalTo(tuple._2),
tuple._3 -> tuple._4
).asJava
)
)
}
/**
* Query a table, counting the results.
*
* This method will internally make repeated query calls
* until the full result of the query has been retrieved.
*
* @tparam T
* the type of object queried.
* @see [[CountQueryMagnet]]
* @see [[query]]
* @see [[queryOnce]]
*/
def countQuery[T](magnet: CountQueryMagnet[T]): Future[Long] = magnet()
/**
* Helper method to build seqences of keys
*
* Turn a sequence of hash values, into a sequence of hash keys;
* or turn a sequence of hash values and a sequence of range values,
* into a sequence of hash+range keys.
*
* @tparam T
* the type object for the serializer.
* @tparam K1
* a type that is viewable as an [[AttributeValue]].
* @tparam K2
* a type that is viewable as an [[AttributeValue]].
* @param hashKeys
* a sequence of hash key values.
* @param rangeKeys
* an optional sequence of range key values.
* @param serializer
* an implicit object serializer.
* @return a sequence of DynamoDB keys (a map of strings to values).
* @throws IllegalArgumentException if the sequence of hash key values is empty.
* @throws IllegalArgumentException if the number of hash and range keys don't match.
*/
private def zipKeySeqs[T, K1, K2]
(hashKeys: Seq[K1], rangeKeys: Seq[K2] = Seq.empty)
(implicit serializer: DynamoDBSerializer[T],
ev1: K1 => AttributeValue,
ev2: K2 => AttributeValue)
: Seq[DynamoDBKey] =
if (hashKeys.isEmpty) {
throw new IllegalArgumentException("AmazonDynamoDBScalaMapper: no hash keys given")
} else if (!rangeKeys.isEmpty && (hashKeys.length != rangeKeys.length)) {
throw new IllegalArgumentException("AmazonDynamoDBScalaMapper: the number of hash and range keys don't match")
} else if (rangeKeys.isEmpty) {
hashKeys map { hashKey =>
serializer.makeKey(hashKey).asJava
}
} else {
(hashKeys, rangeKeys).zipped map { case (hashKey, rangeKey) =>
serializer.makeKey(hashKey, rangeKey).asJava
}
}
/**
* A method overloading container for [[batchLoadByKeys]].
*
* This class contains the overloaded implementation of [[batchLoadByKeys]].
*
* @tparam T
* the type of the objects returned by the batch load.
* @see [[batchLoadByKeys]]
*/
trait BatchLoadByKeysMagnet[T] {
def apply(): Future[Seq[T]]
}
object BatchLoadByKeysMagnet {
/**
* Load a sequence of objects by a sequence of hash key values.
*
* This method will internally make repeated batchGetItem
* calls, with up to 25 keys at a time, until all of the
* given keys have been requested.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashKeys
* the hash key values of the objects to retrieve.
* @param serializer
* an implicit object serializer.
* @return sequence of retrieved objects in a future.
* @see [[batchLoadByKeys]]
*/
implicit def batchLoadByHashKeys
[T, K]
(hashKeys: Seq[K])
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: BatchLoadByKeysMagnet[T] =
batchLoadByHashAndRangeKeys(hashKeys -> Seq.empty[String])
/*
* Seq.empty[String] ensures that a valid view is inferred
* otherwise we will get an implicit error if Seq.empty
* is given type Seq[Nothing].
*/
/**
* Load a sequence of objects by a sequence of hash key
* values and a sequences of range key values.
*
* This method will internally make repeated batchGetItem
* calls, with up to 25 keys at a time, until all of the
* given keys have been requested.
*
* @tparam K1
* a type that is viewable as an [[AttributeValue]].
* @tparam K2
* a type that is viewable as an [[AttributeValue]].
* @param hashKeys
* the hash key values of the objects to retrieve.
* @param rangeKeys
* the range key values of the objects to retrieve.
* @param serializer
* an implicit object serializer.
* @return sequence of retrieved objects in a future.
* @see [[batchLoadByKeys]]
*/
implicit def batchLoadByHashAndRangeKeys
[T, K1, K2]
(tuple: /* hashKeys */ (Seq[K1],
/* rangeKeys */ Seq[K2]))
(implicit serializer: DynamoDBSerializer[T],
ev1: K1 => AttributeValue,
ev2: K2 => AttributeValue)
: BatchLoadByKeysMagnet[T] = new BatchLoadByKeysMagnet[T] { def apply() = {
val keys: Seq[DynamoDBKey] = zipKeySeqs(tuple._1, tuple._2)
val builder = Seq.newBuilder[T]
def local(keys: (Seq[DynamoDBKey], Seq[DynamoDBKey])): Future[Unit] = {
val request =
new BatchGetItemRequest()
.withRequestItems(
Map(
tableName ->
new KeysAndAttributes()
.withKeys(
keys._1.asJavaCollection
)
.withConsistentRead(config.consistentReads)
).asJava
)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.batchGetItem(request) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"batchLoadByKeys() ConsumedCapacity = ${result.getConsumedCapacity()}")
builder ++= result.getResponses.get(tableName).asScala.view map { item =>
serializer.fromAttributeMap(item.asScala)
}
if (keys._2.isEmpty)
Future.successful(())
else
local(keys._2.splitAt(100))
}
}
local(keys.splitAt(100)) map { _ => builder.result }
}}
}
/**
* Load a sequence of objects by a sequence of keys.
*
* This method will internally make repeated batchGetItem
* calls, with up to 25 keys at a time, until all of the
* given keys have been requested.
*
* @tparam T
* the type of the objects returned by the batch load.
* @see [[BatchLoadByKeysMagnet]]
*/
def batchLoadByKeys[T](magnet: BatchLoadByKeysMagnet[T]): Future[Seq[T]] = magnet()
/**
* A helper method to check for and retry any unprocessed items in
* a batch write result.
*
* This method will attempt to retry any portion of a failed batch write.
*
* @param lastResult
* the result object from a batchWrite operation.
*/
private def checkRetryBatchWrite(lastResult: BatchWriteItemResult): Future[Unit] = {
val retryItems = lastResult.getUnprocessedItems
if (retryItems.isEmpty)
Future.successful(())
else {
val request =
new BatchWriteItemRequest()
.withRequestItems(retryItems)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.batchWriteItem(request) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"checkRetryBatchWrite() ConsumedCapacity = ${result.getConsumedCapacity()}")
checkRetryBatchWrite(result)
}
}
}
/**
* Dump a sequence of objects into DynamoDB
*
* This method will internally make repeated batchWriteItem
* calls, with up to 25 objects at a time, until all the input
* objects have been written.
*
* Objects that are new will create new items in DynamoDB,
* otherwise they will overwrite exisiting items.
*
* @param objs
* the sequence of objects to write to DynamoDB.
*/
def batchDump[T](objs: Seq[T])(implicit serializer: DynamoDBSerializer[T]): Future[Unit] = {
def local(objsPair: (Seq[T], Seq[T])): Future[Unit] = {
val request =
new BatchWriteItemRequest()
.withRequestItems(
Map(
tableName -> objsPair._1.view.map { obj =>
new WriteRequest()
.withPutRequest(
new PutRequest()
.withItem(serializer.toAttributeMap(obj).asJava)
)
} .asJava
).asJava
)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.batchWriteItem(request) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"batchDump() ConsumedCapacity = ${result.getConsumedCapacity()}")
checkRetryBatchWrite(result) flatMap { _ =>
if (objsPair._2.isEmpty)
Future.successful(())
else
local(objsPair._2.splitAt(25))
}
}
}
local(objs.splitAt(25))
}
/**
* Delete a sequence of objects.
*
* This method will internally make repeated batchWriteItem
* calls, with up to 25 objects at a time, until all the input
* objects have been deleted.
*
* @tparam T
* the type of objects to delete.
* @param objs
* a sequence of objects to delete.
* @param serializer
* an implicit object serializer.
*/
def batchDelete[T](objs: Seq[T])(implicit serializer: DynamoDBSerializer[T]): Future[Unit] = {
def local(objsPair: (Seq[T], Seq[T])): Future[Unit] = {
val request =
new BatchWriteItemRequest()
.withRequestItems(
Map(
tableName -> objsPair._1.view.map { obj =>
new WriteRequest()
.withDeleteRequest(
new DeleteRequest()
.withKey(serializer.primaryKeyOf(obj).asJava)
)
} .asJava
).asJava
)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.batchWriteItem(request) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"batchDelete() ConsumedCapacity = ${result.getConsumedCapacity()}")
checkRetryBatchWrite(result) flatMap { _ =>
if (objsPair._2.isEmpty)
Future.successful(())
else
local(objsPair._2.splitAt(25))
}
}
}
local(objs.splitAt(25))
}
/**
* A method overloading container for [[batchDeleteByKeys]].
*
* This class contains the overloaded implementation of [[batchDeleteByKeys]].
*
* @tparam T
* the type of the objects deleted by the batch delete.
* @see [[batchDeleteByKeys]]
*/
trait BatchDeleteByKeysMagnet[T] {
def apply(): Future[Unit]
}
object BatchDeleteByKeysMagnet {
/**
* Delete items by a sequence of hash key values.
*
* This method will internally make repeated batchWriteItem
* calls, with up to 25 keys at a time, until all the input
* keys have been deleted.
*
* @tparam K
* a type that is viewable as an [[AttributeValue]].
* @param hashKeys
* the hash key values of the items to delete.
* @param serializer
* an implicit object serializer.
* @see [[batchDeleteByKeys]]
*/
implicit def batchDeleteByHashKeys
[T, K]
(hashKeys: Seq[K])
(implicit serializer: DynamoDBSerializer[T],
ev: K => AttributeValue)
: BatchDeleteByKeysMagnet[T] =
batchDeleteByHashAndRangeKeys(hashKeys -> Seq.empty[String])
/*
* Seq.empty[String] ensures that a valid view is inferred
* otherwise we will get an implicit error if Seq.empty
* is given type Seq[Nothing].
*/
/**
* Delete items by a sequence of hash key values and a
* sequence of range key values.
*
* This method will internally make repeated batchWriteItem
* calls, with up to 25 keys at a time, until all the input
* keys have been deleted.
*
* @tparam K1
* a type that is viewable as an [[AttributeValue]].
* @tparam K2
* a type that is viewable as an [[AttributeValue]].
* @param hashKeys
* the hash key values of the items to delete.
* @param rangeKeys
* the range key values of the items to delete.
* @param serializer
* an implicit object serializer.
* @see [[batchDeleteByKeys]]
*/
implicit def batchDeleteByHashAndRangeKeys
[T, K1, K2]
(tuple: /* hashKeys */ (Seq[K1],
/* rangeKeys */ Seq[K2]))
(implicit serializer: DynamoDBSerializer[T],
ev1: K1 => AttributeValue,
ev2: K2 => AttributeValue)
: BatchDeleteByKeysMagnet[T] = new BatchDeleteByKeysMagnet[T] { def apply() = {
val keys: Seq[DynamoDBKey] = zipKeySeqs(tuple._1, tuple._2)
def local(keysPair: (Seq[DynamoDBKey], Seq[DynamoDBKey])): Future[Unit] = {
val request =
new BatchWriteItemRequest()
.withRequestItems(
Map(
tableName -> keysPair._1.view.map { key =>
new WriteRequest()
.withDeleteRequest(
new DeleteRequest()
.withKey(key)
)
} .asJava
).asJava
)
if (logger.isDebugEnabled)
request.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
client.batchWriteItem(request) flatMap { result =>
if (logger.isDebugEnabled)
logger.debug(s"batchDeleteByKeys() ConsumedCapacity = ${result.getConsumedCapacity()}")
checkRetryBatchWrite(result) flatMap { _ =>
if (keysPair._2.isEmpty)
Future.successful(())
else
local(keysPair._2.splitAt(25))
}
}
}
local(keys.splitAt(25))
}}
}
/**
* Delete items by a sequence of keys.
*
* This method will internally make repeated batchWriteItem
* calls, with up to 25 keys at a time, until all the input
* keys have been deleted.
*
* @tparam T
* the type of the objects deleted by the batch delete.
* @see [[BatchDeleteByKeysMagnet]]
*/
def batchDeleteByKeys[T](magnet: BatchDeleteByKeysMagnet[T]): Future[Unit] = magnet()
}
/**
* A factory for [[AmazonDynamoDBScalaMapper]].
*/
object AmazonDynamoDBScalaMapper {
/**
* A factory method for [[AmazonDynamoDBScalaMapper]].
*
* Build a new mapper from a client, a config, and
* an execution context.
*
* @param dynamoClient
* the DynamoDB client to use.
* @param mapperConfig
* the mapping configuration to use.
* @param exec
* the execution context to use.
* @return a new mapper.
*/
def apply(
dynamoClient: AmazonDynamoDBScalaClient,
mapperConfig: AmazonDynamoDBScalaMapperConfig = AmazonDynamoDBScalaMapperConfig.Default
)(implicit exec: ExecutionContext) = new AmazonDynamoDBScalaMapper {
override val client = dynamoClient
override protected val execCtx = exec
override protected val config = mapperConfig
}
}
|
sunilake/aws-wrap
|
integration/src/it/scala/dynamodb/ReadsOnHashKeyTablesSpec.scala
|
<reponame>sunilake/aws-wrap
/*
* Copyright 2012-2015 Pellucid Analytics
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.scalatest.{ FlatSpec, BeforeAndAfterAll, Matchers }
import com.amazonaws.AmazonClientException
class ReadsOnHashKeyTableSpec
extends FlatSpec
with Matchers
with DynamoDBClient
{
import SampleData.sampleForums
override val tableNames = Seq(Forum.tableName)
val mapper = AmazonDynamoDBScalaMapper(client)
override def beforeAll(): Unit = {
super.beforeAll()
tryCreateTable(Forum.tableRequest)
awaitTableCreation(Forum.tableName)
await(30.seconds) {
mapper.batchDump(sampleForums)
}
}
"DynamoDB" should s"contain the '${Forum.tableName}' table" in {
val result = await(1.minutes) {
client.listTables()
}
result.getTableNames().asScala should contain (Forum.tableName)
}
it should "contain the first sample Forum" in {
import org.scalatest.OptionValues._
await {
mapper.loadByKey[Forum](sampleForums.head.name)
} .value should be (sampleForums.head)
val result = await {
mapper.batchLoadByKeys[Forum](Seq(sampleForums.head.name))
}
result should have size (1)
result.head should be (sampleForums.head)
}
it should s"contain ${sampleForums.size} forum items" in {
await {
mapper.countScan[Forum]()
} should equal (sampleForums.size)
}
it should s"contain the sample forum items" in {
val forumScan = await {
mapper.scan[Forum]()
}
val forumScanOnce = await {
mapper.scanOnce[Forum]()
}
val forumScanOnceLimit = await {
mapper.scanOnce[Forum](limit = sampleForums.size)
}
val forumProgressively = await {
mapper.scanProgressively[Forum](limit = sampleForums.size).map(_._2)
}
val forumProgressivelyLimit = await {
for {
(lastEvaluatedKey, head) <- mapper.scanProgressively[Forum](limit = 1)
(lastEvaluatedKeyNone, next) <- mapper.scanProgressively[Forum](lastEvaluatedKey = lastEvaluatedKey)
} yield {
lastEvaluatedKey should not be empty
lastEvaluatedKeyNone shouldEqual None
head ++ next
}
}
val forumBatch = await {
mapper.batchLoadByKeys[Forum](sampleForums map (_.name))
}
forumScan should have size (sampleForums.size.toLong)
forumScanOnce should have size (sampleForums.size.toLong)
forumScanOnceLimit should have size (sampleForums.size.toLong)
forumProgressively should have size (sampleForums.size.toLong)
forumProgressivelyLimit should have size (sampleForums.size.toLong)
forumBatch should have size (sampleForums.size.toLong)
for (forum <- sampleForums) {
forumScan should contain (forum)
forumScanOnce should contain (forum)
forumBatch should contain (forum)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.