code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package de.frosner.broccoli.test.contexts
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.specs2.execute.{AsResult, Result}
import org.specs2.specification.ForEach
import org.specs2.specification.mutable.ExecutionEnvironment
import play.api.libs.ws.WSClient
import play.api.libs.ws.ahc.AhcWSClient
/**
* Provides a WSClient instance to tests.
*
* Requires the ExecutionEnvironment to be mixed in.
*/
trait WSClientContext extends ForEach[WSClient] {
self: ExecutionEnvironment =>
override protected def foreach[R: AsResult](f: (WSClient) => R): Result = {
implicit val actorSystem = ActorSystem("nomad-http-client")
try {
implicit val materializer = ActorMaterializer()
val client: WSClient = AhcWSClient()
try AsResult(f(client))
finally client.close()
} finally {
actorSystem.terminate()
}
}
}
| FRosner/cluster-broccoli | server/src/it/scala/de/frosner/broccoli/test/contexts/WSClientContext.scala | Scala | apache-2.0 | 889 |
class MyInt(val x: Int) {
def eq(that: MyInt): Boolean = this.x == that.x
}
class Test {
def foo(x: MyInt, y: MyInt): Boolean = x.eq(y)
val a = MyInt(2)
val b = MyInt(3)
foo(a, b)
} | dotty-staging/dotty | tests/bench/FindRef.scala | Scala | apache-2.0 | 193 |
/*
* Copyright 2012 Tumblr Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.config
import com.twitter.conversions.time.intToTimeableNumber
import com.twitter.finagle.redis.Client
import com.twitter.util.Duration
import com.twitter.zipkin.storage.redis.RedisStorage
/**
* RedisStorageConfig has sane defaults, except you must specify your host and port.
*/
trait RedisStorageConfig extends StorageConfig {
lazy val _client: Client = Client("%s:%d".format(host, port))
val port: Int
val host: String
val tracesTimeToLive: Duration = 7.days
/**
* The canonical way of making a new RedisStorage
*/
def apply(): RedisStorage = new RedisStorage {
val database = _client
val ttl = Some(tracesTimeToLive)
}
} | cordoval/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/config/RedisStorageConfig.scala | Scala | apache-2.0 | 1,285 |
package java.io
import java.net.URI
import java.nio.file.Path
import scala.meta.internal.io._
// obtained implementation by experimentation on the JDK.
class File(path: String) {
def this(parent: String, child: String) =
this(parent + File.separator + child)
def this(parent: File, child: String) =
this(parent.getPath, child)
def this(uri: URI) =
this(
if (uri.getScheme != "file") {
throw new IllegalArgumentException("URI scheme is not \"file\"")
} else {
uri.getPath
}
)
def toPath: Path =
NodeNIOPath(path)
def toURI: URI = {
val file = getAbsoluteFile.toString
val uripath = if (file.startsWith("/")) file else "/" + file.replace(File.separator, "/")
val withslash = if (isDirectory && !uripath.endsWith("/")) uripath + "/" else uripath
new URI("file", null, withslash, null)
}
def getAbsoluteFile: File =
toPath.toAbsolutePath.toFile
def getAbsolutePath: String =
getAbsoluteFile.toString
def getParentFile: File =
toPath.getParent.toFile
def mkdirs(): Unit =
throw new UnsupportedOperationException("mkdirs() is not supported in Scala.js")
def getPath: String =
path
def exists(): Boolean =
JSIO.exists(path)
def isFile: Boolean =
JSIO.isFile(path)
def isDirectory: Boolean =
JSIO.isDirectory(path)
override def toString: String =
path
}
object File {
def listRoots(): Array[File] = Array(
new File(
if (JSIO.isNode) JSPath.parse(JSPath.resolve()).root
else "/"
)
)
def separatorChar: Char =
separator.charAt(0)
def separator: String =
if (JSIO.isNode) JSPath.sep
else "/"
def pathSeparator: String =
if (JSIO.isNode) JSPath.delimiter
else ":"
}
| scalameta/scalameta | scalameta/io/js/src/main/scala/java/io/File.scala | Scala | bsd-3-clause | 1,746 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.api.utils
import java.io.File
import java.util.UUID
import scala.collection.JavaConversions
import scala.util._
import org.apache.commons.io.FileUtils
import org.apache.curator.framework.CuratorFramework
import org.json4s.jackson.Serialization._
import com.stratio.sparta.driver.util.HdfsUtils
import com.stratio.sparta.serving.api.constants.ActorsConstant
import com.stratio.sparta.serving.api.helpers.SpartaHelper._
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.models.AggregationPoliciesModel
import com.stratio.sparta.serving.core.{CuratorFactoryHolder, SpartaConfig}
trait PolicyUtils {
def existsByName(name: String, id: Option[String] = None, curatorFramework: CuratorFramework): Boolean = {
val nameToCompare = name.toLowerCase
Try {
if (existsPath)
getPolicies(curatorFramework).exists(byName(id, nameToCompare))
else {
log.warn(s"Zookeeper path for policies doesn't exists. It will be created.")
false
}
} match {
case Success(result) => result
case Failure(exception) =>
log.error(exception.getLocalizedMessage, exception)
false
}
}
def existsPath: Boolean = CuratorFactoryHolder.existsPath(AppConstant.PoliciesBasePath)
def byName(id: Option[String], nameToCompare: String): (AggregationPoliciesModel) => Boolean = {
policy =>
if (id.isDefined)
policy.name == nameToCompare && policy.id.get != id.get
else policy.name == nameToCompare
}
def savePolicyInZk(policy: AggregationPoliciesModel, curatorFramework: CuratorFramework): Unit = {
Try {
populatePolicy(policy, curatorFramework)
} match {
case Success(_) => log.info(s"Policy ${policy.id.get} already in zookeeper. Updating it...")
updatePolicy(policy, curatorFramework)
case Failure(e) => writePolicy(policy, curatorFramework)
}
}
def writePolicy(policy: AggregationPoliciesModel, curatorFramework: CuratorFramework): Unit = {
curatorFramework.create().creatingParentsIfNeeded().forPath(
s"${AppConstant.PoliciesBasePath}/${policy.id.get}", write(policy).getBytes)
}
def updatePolicy(policy: AggregationPoliciesModel, curatorFramework: CuratorFramework): Unit = {
curatorFramework.setData().forPath(s"${AppConstant.PoliciesBasePath}/${policy.id.get}", write(policy).getBytes)
}
def populatePolicy(policy: AggregationPoliciesModel, curatorFramework: CuratorFramework): AggregationPoliciesModel = {
read[AggregationPoliciesModel](new Predef.String(curatorFramework.getData.forPath(
s"${AppConstant.PoliciesBasePath}/${policy.id.get}")))
}
def policyWithId(policy: AggregationPoliciesModel): AggregationPoliciesModel =
(policy.id match {
case None => populatePolicyWithRandomUUID(policy)
case Some(_) => policy
}).copy(name = policy.name.toLowerCase, version = Some(ActorsConstant.UnitVersion))
def populatePolicyWithRandomUUID(policy: AggregationPoliciesModel): AggregationPoliciesModel = {
policy.copy(id = Some(UUID.randomUUID.toString))
}
def deleteCheckpointPath(policy: AggregationPoliciesModel): Unit = {
Try {
if (!isLocalMode || checkpointGoesToHDFS(policy))
deleteFromHDFS(policy)
else deleteFromLocal(policy)
} match {
case Success(_) => log.info(s"Checkpoint deleted in folder: ${AggregationPoliciesModel.checkpointPath(policy)}")
case Failure(ex) => log.error("Cannot delete checkpoint folder", ex)
}
}
def deleteFromLocal(policy: AggregationPoliciesModel): Unit =
FileUtils.deleteDirectory(new File(AggregationPoliciesModel.checkpointPath(policy)))
def deleteFromHDFS(policy: AggregationPoliciesModel): Unit =
HdfsUtils(SpartaConfig.getHdfsConfig).delete(AggregationPoliciesModel.checkpointPath(policy))
def checkpointGoesToHDFS(policy: AggregationPoliciesModel): Boolean =
policy.checkpointPath.exists(_.startsWith("hdfs://"))
def isLocalMode: Boolean =
SpartaConfig.getDetailConfig match {
case Some(detailConfig) => detailConfig.getString(AppConstant.ExecutionMode).equalsIgnoreCase("local")
case None => true
}
def existsByNameId(name: String, id: Option[String] = None, curatorFramework: CuratorFramework):
Option[AggregationPoliciesModel] = {
val nameToCompare = name.toLowerCase
Try {
if (existsPath) {
getPolicies(curatorFramework)
.find(policy => if (id.isDefined) policy.id.get == id.get else policy.name == nameToCompare)
} else None
} match {
case Success(result) => result
case Failure(exception) =>
log.error(exception.getLocalizedMessage, exception)
None
}
}
def setVersion(lastPolicy: AggregationPoliciesModel, newPolicy: AggregationPoliciesModel): Option[Int] = {
if (lastPolicy.cubes != newPolicy.cubes) {
lastPolicy.version match {
case Some(version) => Some(version + ActorsConstant.UnitVersion)
case None => Some(ActorsConstant.UnitVersion)
}
} else lastPolicy.version
}
def getPolicies(curatorFramework: CuratorFramework): List[AggregationPoliciesModel] = {
val children = curatorFramework.getChildren.forPath(AppConstant.PoliciesBasePath)
JavaConversions.asScalaBuffer(children).toList.map(element =>
read[AggregationPoliciesModel](new Predef.String(curatorFramework.getData.
forPath(s"${AppConstant.PoliciesBasePath}/$element"))))
}
def byId(id: String, curatorFramework: CuratorFramework): AggregationPoliciesModel =
read[AggregationPoliciesModel](
new Predef.String(curatorFramework.getData.forPath(s"${AppConstant.PoliciesBasePath}/$id")))
def deleteRelatedPolicies(policies: Seq[AggregationPoliciesModel]): Unit = {
policies.foreach(deleteCheckpointPath)
}
}
| danielcsant/sparta | serving-api/src/main/scala/com/stratio/sparta/serving/api/utils/PolicyUtils.scala | Scala | apache-2.0 | 6,481 |
package com.omis.client.RootModels
import com.omis.EmpDetails
case class EmployeesRootModel(emp: Seq[EmpDetails])
| iriddhi/mis | client/src/main/scala/com/omis/client/RootModels/EmployeesRootModel.scala | Scala | apache-2.0 | 116 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.immutable.Set
import com.yammer.metrics.core.Gauge
import java.lang.{IllegalStateException, Object}
import java.util.concurrent.TimeUnit
import kafka.admin.PreferredReplicaLeaderElectionCommand
import kafka.api._
import kafka.cluster.Broker
import kafka.common._
import kafka.metrics.{KafkaTimer, KafkaMetricsGroup}
import kafka.server.{ZookeeperLeaderElector, KafkaConfig}
import kafka.utils.ZkUtils._
import kafka.utils.{Utils, ZkUtils, Logging}
import org.apache.zookeeper.Watcher.Event.KeeperState
import org.I0Itec.zkclient.{IZkDataListener, IZkStateListener, ZkClient}
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException}
import scala.Some
import kafka.common.TopicAndPartition
class ControllerContext(val zkClient: ZkClient,
var controllerChannelManager: ControllerChannelManager = null,
val controllerLock: Object = new Object,
var shuttingDownBrokerIds: mutable.Set[Int] = mutable.Set.empty,
val brokerShutdownLock: Object = new Object,
var epoch: Int = KafkaController.InitialControllerEpoch - 1,
var epochZkVersion: Int = KafkaController.InitialControllerEpochZkVersion - 1,
var allTopics: Set[String] = Set.empty,
var partitionReplicaAssignment: mutable.Map[TopicAndPartition, Seq[Int]] = mutable.Map.empty,
var allLeaders: mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = mutable.Map.empty,
var partitionsBeingReassigned: mutable.Map[TopicAndPartition, ReassignedPartitionsContext] =
new mutable.HashMap,
var partitionsUndergoingPreferredReplicaElection: mutable.Set[TopicAndPartition] =
new mutable.HashSet) {
private var liveBrokersUnderlying: Set[Broker] = Set.empty
private var liveBrokerIdsUnderlying: Set[Int] = Set.empty
// setter
def liveBrokers_=(brokers: Set[Broker]) {
liveBrokersUnderlying = brokers
liveBrokerIdsUnderlying = liveBrokersUnderlying.map(_.id)
}
// getter
def liveBrokers = liveBrokersUnderlying.filter(broker => !shuttingDownBrokerIds.contains(broker.id))
def liveBrokerIds = liveBrokerIdsUnderlying.filter(brokerId => !shuttingDownBrokerIds.contains(brokerId))
def liveOrShuttingDownBrokerIds = liveBrokerIdsUnderlying ++ shuttingDownBrokerIds
}
trait KafkaControllerMBean {
def shutdownBroker(id: Int): Int
}
object KafkaController {
val MBeanName = "kafka.controller:type=KafkaController,name=ControllerOps"
val InitialControllerEpoch = 1
val InitialControllerEpochZkVersion = 1
}
class KafkaController(val config : KafkaConfig, zkClient: ZkClient) extends Logging with KafkaMetricsGroup with KafkaControllerMBean {
this.logIdent = "[Controller " + config.brokerId + "]: "
private var isRunning = true
val controllerContext = new ControllerContext(zkClient)
private val partitionStateMachine = new PartitionStateMachine(this)
private val replicaStateMachine = new ReplicaStateMachine(this)
private val controllerElector = new ZookeeperLeaderElector(controllerContext, ZkUtils.ControllerPath, onControllerFailover,
config.brokerId)
private val reassignedPartitionLeaderSelector = new ReassignedPartitionLeaderSelector(controllerContext)
private val preferredReplicaPartitionLeaderSelector = new PreferredReplicaPartitionLeaderSelector(controllerContext)
private val controlledShutdownPartitionLeaderSelector = new ControlledShutdownLeaderSelector(controllerContext)
private val brokerRequestBatch = new ControllerBrokerRequestBatch(sendRequest)
registerControllerChangedListener()
newGauge(
"ActiveControllerCount",
new Gauge[Int] {
def getValue() = if (isActive) 1 else 0
}
)
def epoch = controllerContext.epoch
/**
* JMX operation to initiate clean shutdown of a broker. On clean shutdown,
* the controller first determines the partitions that the shutting down
* broker leads, and moves leadership of those partitions to another broker
* that is in that partition's ISR. When all partitions have been moved, the
* broker process can be stopped normally (i.e., by sending it a SIGTERM or
* SIGINT) and no data loss should be observed.
*
* @param id Id of the broker to shutdown.
* @return The number of partitions that the broker still leads.
*/
def shutdownBroker(id: Int) = {
controllerContext.brokerShutdownLock synchronized {
info("Shutting down broker " + id)
controllerContext.controllerLock synchronized {
if (!controllerContext.liveOrShuttingDownBrokerIds.contains(id))
throw new BrokerNotAvailableException("Broker id %d does not exist.".format(id))
controllerContext.shuttingDownBrokerIds.add(id)
debug("All shutting down brokers: " + controllerContext.shuttingDownBrokerIds.mkString(","))
debug("Live brokers: " + controllerContext.liveBrokerIds.mkString(","))
}
val allPartitionsAndReplicationFactorOnBroker = controllerContext.controllerLock synchronized {
getPartitionsAssignedToBroker(zkClient, controllerContext.allTopics.toSeq, id).map {
case(topic, partition) =>
val topicAndPartition = TopicAndPartition(topic, partition)
(topicAndPartition, controllerContext.partitionReplicaAssignment(topicAndPartition).size)
}
}
def replicatedPartitionsBrokerLeads() = controllerContext.controllerLock.synchronized {
trace("All leaders = " + controllerContext.allLeaders.mkString(","))
controllerContext.allLeaders.filter {
case (topicAndPartition, leaderIsrAndControllerEpoch) =>
leaderIsrAndControllerEpoch.leaderAndIsr.leader == id && controllerContext.partitionReplicaAssignment(topicAndPartition).size > 1
}.map(_._1)
}
val partitionsToMove = replicatedPartitionsBrokerLeads().toSet
debug("Partitions to move leadership from broker %d: %s".format(id, partitionsToMove.mkString(",")))
partitionsToMove.foreach{ topicAndPartition =>
val (topic, partition) = topicAndPartition.asTuple
// move leadership serially to relinquish lock.
controllerContext.controllerLock synchronized {
controllerContext.allLeaders.get(topicAndPartition).foreach { currLeaderIsrAndControllerEpoch =>
if (currLeaderIsrAndControllerEpoch.leaderAndIsr.leader == id) {
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition,
controlledShutdownPartitionLeaderSelector)
val newLeaderIsrAndControllerEpoch = controllerContext.allLeaders(topicAndPartition)
// mark replica offline only if leadership was moved successfully
if (newLeaderIsrAndControllerEpoch.leaderAndIsr.leader != currLeaderIsrAndControllerEpoch.leaderAndIsr.leader)
replicaStateMachine.handleStateChanges(Set(PartitionAndReplica(topic, partition, id)), OfflineReplica)
} else
debug("Partition %s moved from leader %d to new leader %d during shutdown."
.format(topicAndPartition, id, currLeaderIsrAndControllerEpoch.leaderAndIsr.leader))
}
}
}
/*
* Force the shutting down broker out of the ISR of partitions that it
* follows, and shutdown the corresponding replica fetcher threads.
* This is really an optimization, so no need to register any callback
* to wait until completion.
*/
brokerRequestBatch.newBatch()
allPartitionsAndReplicationFactorOnBroker foreach {
case(topicAndPartition, replicationFactor) =>
val (topic, partition) = topicAndPartition.asTuple
if (controllerContext.allLeaders(topicAndPartition).leaderAndIsr.leader != id) {
brokerRequestBatch.addStopReplicaRequestForBrokers(Seq(id), topic, partition, deletePartition = false)
removeReplicaFromIsr(topic, partition, id) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(
Seq(updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader), topic, partition,
updatedLeaderIsrAndControllerEpoch, replicationFactor)
case None =>
// ignore
}
}
}
brokerRequestBatch.sendRequestsToBrokers(epoch, controllerContext.liveBrokers)
val partitionsRemaining = replicatedPartitionsBrokerLeads().toSet
debug("Remaining partitions to move on broker %d: %s".format(id, partitionsRemaining.mkString(",")))
partitionsRemaining.size
}
}
/**
* This callback is invoked by the zookeeper leader elector on electing the current broker as the new controller.
* It does the following things on the become-controller state change -
* 1. Register controller epoch changed listener
* 2. Increments the controller epoch
* 3. Initializes the controller's context object that holds cache objects for current topics, live brokers and
* leaders for all existing partitions.
* 4. Starts the controller's channel manager
* 5. Starts the replica state machine
* 6. Starts the partition state machine
* If it encounters any unexpected exception/error while becoming controller, it resigns as the current controller.
* This ensures another controller election will be triggered and there will always be an actively serving controller
*/
def onControllerFailover() {
if(isRunning) {
info("Broker %d starting become controller state transition".format(config.brokerId))
// increment the controller epoch
incrementControllerEpoch(zkClient)
// before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks
registerReassignedPartitionsListener()
registerPreferredReplicaElectionListener()
partitionStateMachine.registerListeners()
replicaStateMachine.registerListeners()
initializeControllerContext()
partitionStateMachine.startup()
replicaStateMachine.startup()
Utils.registerMBean(this, KafkaController.MBeanName)
info("Broker %d is ready to serve as the new controller with epoch %d".format(config.brokerId, epoch))
}
else
info("Controller has been shut down, aborting startup/failover")
}
/**
* Returns true if this broker is the current controller.
*/
def isActive(): Boolean = {
controllerContext.controllerChannelManager != null
}
/**
* This callback is invoked by the replica state machine's broker change listener, with the list of newly started
* brokers as input. It does the following -
* 1. Triggers the OnlinePartition state change for all new/offline partitions
* 2. It checks whether there are reassigned replicas assigned to any newly started brokers. If
* so, it performs the reassignment logic for each topic/partition.
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point for two reasons:
* 1. The partition state machine, when triggering online state change, will refresh leader and ISR for only those
* partitions currently new or offline (rather than every partition this controller is aware of)
* 2. Even if we do refresh the cache, there is no guarantee that by the time the leader and ISR request reaches
* every broker that it is still valid. Brokers check the leader epoch to determine validity of the request.
*/
def onBrokerStartup(newBrokers: Seq[Int]) {
info("New broker startup callback for %s".format(newBrokers.mkString(",")))
val newBrokersSet = newBrokers.toSet
// update partition state machine
partitionStateMachine.triggerOnlinePartitionStateChange()
replicaStateMachine.handleStateChanges(getAllReplicasOnBroker(zkClient, controllerContext.allTopics.toSeq, newBrokers), OnlineReplica)
// check if reassignment of some partitions need to be restarted
val partitionsWithReplicasOnNewBrokers = controllerContext.partitionsBeingReassigned.filter{
case (topicAndPartition, reassignmentContext) =>
reassignmentContext.newReplicas.exists(newBrokersSet.contains(_))
}
partitionsWithReplicasOnNewBrokers.foreach(p => onPartitionReassignment(p._1, p._2))
}
/**
* This callback is invoked by the replica state machine's broker change listener with the list of failed brokers
* as input. It does the following -
* 1. Mark partitions with dead leaders as offline
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. Invokes the OfflineReplica state change on the input list of newly started brokers
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point. This is because
* the partition state machine will refresh our cache for us when performing leader election for all new/offline
* partitions coming online.
*/
def onBrokerFailure(deadBrokers: Seq[Int]) {
info("Broker failure callback for %s".format(deadBrokers.mkString(",")))
val deadBrokersThatWereShuttingDown =
deadBrokers.filter(id => controllerContext.shuttingDownBrokerIds.remove(id))
info("Removed %s from list of shutting down brokers.".format(deadBrokersThatWereShuttingDown))
val deadBrokersSet = deadBrokers.toSet
// trigger OfflinePartition state for all partitions whose current leader is one amongst the dead brokers
val partitionsWithoutLeader = controllerContext.allLeaders.filter(partitionAndLeader =>
deadBrokersSet.contains(partitionAndLeader._2.leaderAndIsr.leader)).keySet
partitionStateMachine.handleStateChanges(partitionsWithoutLeader, OfflinePartition)
// trigger OnlinePartition state changes for offline or new partitions
partitionStateMachine.triggerOnlinePartitionStateChange()
// handle dead replicas
replicaStateMachine.handleStateChanges(getAllReplicasOnBroker(zkClient, controllerContext.allTopics.toSeq, deadBrokers), OfflineReplica)
}
/**
* This callback is invoked by the partition state machine's topic change listener with the list of failed brokers
* as input. It does the following -
* 1. Registers partition change listener. This is not required until KAFKA-347
* 2. Invokes the new partition callback
*/
def onNewTopicCreation(topics: Set[String], newPartitions: Set[TopicAndPartition]) {
info("New topic creation callback for %s".format(newPartitions.mkString(",")))
// subscribe to partition changes
topics.foreach(topic => partitionStateMachine.registerPartitionChangeListener(topic))
onNewPartitionCreation(newPartitions)
}
/**
* This callback is invoked by the topic change callback with the list of failed brokers as input.
* It does the following -
* 1. Move the newly created partitions to the NewPartition state
* 2. Move the newly created partitions from NewPartition->OnlinePartition state
*/
def onNewPartitionCreation(newPartitions: Set[TopicAndPartition]) {
info("New partition creation callback for %s".format(newPartitions.mkString(",")))
partitionStateMachine.handleStateChanges(newPartitions, NewPartition)
replicaStateMachine.handleStateChanges(getAllReplicasForPartition(newPartitions), NewReplica)
partitionStateMachine.handleStateChanges(newPartitions, OnlinePartition)
replicaStateMachine.handleStateChanges(getAllReplicasForPartition(newPartitions), OnlineReplica)
}
/**
* This callback is invoked by the reassigned partitions listener. When an admin command initiates a partition
* reassignment, it creates the /admin/reassign_partitions path that triggers the zookeeper listener.
* Reassigning replicas for a partition goes through a few stages -
* RAR = Reassigned replicas
* AR = Original list of replicas for partition
* 1. Register listener for ISR changes to detect when the RAR is a subset of the ISR
* 2. Start new replicas RAR - AR.
* 3. Wait until new replicas are in sync with the leader
* 4. If the leader is not in RAR, elect a new leader from RAR
* 5. Stop old replicas AR - RAR
* 6. Write new AR
* 7. Remove partition from the /admin/reassign_partitions path
*/
def onPartitionReassignment(topicAndPartition: TopicAndPartition, reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
areReplicasInIsr(topicAndPartition.topic, topicAndPartition.partition, reassignedReplicas) match {
case true =>
// mark the new replicas as online
reassignedReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition,
replica)), OnlineReplica)
}
// check if current leader is in the new replicas list. If not, controller needs to trigger leader election
moveReassignedPartitionLeaderIfRequired(topicAndPartition, reassignedPartitionContext)
// stop older replicas
stopOldReplicasOfReassignedPartition(topicAndPartition, reassignedPartitionContext)
// write the new list of replicas for this partition in zookeeper
updateAssignedReplicasForPartition(topicAndPartition, reassignedPartitionContext)
// update the /admin/reassign_partitions path to remove this partition
removePartitionFromReassignedPartitions(topicAndPartition)
info("Removed partition %s from the list of reassigned partitions in zookeeper".format(topicAndPartition))
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
case false =>
info("New replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned not yet caught up with the leader")
// start new replicas
startNewReplicasForReassignedPartition(topicAndPartition, reassignedPartitionContext)
info("Waiting for new replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned to catch up with the leader")
}
}
def onPreferredReplicaElection(partitions: Set[TopicAndPartition]) {
info("Starting preferred replica leader election for partitions %s".format(partitions.mkString(",")))
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitions
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, preferredReplicaPartitionLeaderSelector)
}
/**
* Invoked when the controller module of a Kafka server is started up. This does not assume that the current broker
* is the controller. It merely registers the session expiration listener and starts the controller leader
* elector
*/
def startup() = {
controllerContext.controllerLock synchronized {
info("Controller starting up");
registerSessionExpirationListener()
isRunning = true
controllerElector.startup
info("Controller startup complete")
}
}
/**
* Invoked when the controller module of a Kafka server is shutting down. If the broker was the current controller,
* it shuts down the partition and replica state machines. If not, those are a no-op. In addition to that, it also
* shuts down the controller channel manager, if one exists (i.e. if it was the current controller)
*/
def shutdown() = {
controllerContext.controllerLock synchronized {
isRunning = false
partitionStateMachine.shutdown()
replicaStateMachine.shutdown()
if(controllerContext.controllerChannelManager != null) {
controllerContext.controllerChannelManager.shutdown()
controllerContext.controllerChannelManager = null
info("Controller shutdown complete")
}
}
}
def sendRequest(brokerId : Int, request : RequestOrResponse, callback: (RequestOrResponse) => Unit = null) = {
controllerContext.controllerChannelManager.sendRequest(brokerId, request, callback)
}
def incrementControllerEpoch(zkClient: ZkClient) = {
try {
var newControllerEpoch = controllerContext.epoch + 1
val (updateSucceeded, newVersion) = ZkUtils.conditionalUpdatePersistentPathIfExists(zkClient,
ZkUtils.ControllerEpochPath, newControllerEpoch.toString, controllerContext.epochZkVersion)
if(!updateSucceeded)
throw new ControllerMovedException("Controller moved to another broker. Aborting controller startup procedure")
else {
controllerContext.epochZkVersion = newVersion
controllerContext.epoch = newControllerEpoch
}
} catch {
case nne: ZkNoNodeException =>
// if path doesn't exist, this is the first controller whose epoch should be 1
// the following call can still fail if another controller gets elected between checking if the path exists and
// trying to create the controller epoch path
try {
zkClient.createPersistent(ZkUtils.ControllerEpochPath, KafkaController.InitialControllerEpoch.toString)
controllerContext.epoch = KafkaController.InitialControllerEpoch
controllerContext.epochZkVersion = KafkaController.InitialControllerEpochZkVersion
} catch {
case e: ZkNodeExistsException => throw new ControllerMovedException("Controller moved to another broker. " +
"Aborting controller startup procedure")
case oe => error("Error while incrementing controller epoch", oe)
}
case oe => error("Error while incrementing controller epoch", oe)
}
info("Controller %d incremented epoch to %d".format(config.brokerId, controllerContext.epoch))
}
private def registerSessionExpirationListener() = {
zkClient.subscribeStateChanges(new SessionExpirationListener())
}
private def initializeControllerContext() {
controllerContext.liveBrokers = ZkUtils.getAllBrokersInCluster(zkClient).toSet
controllerContext.allTopics = ZkUtils.getAllTopics(zkClient).toSet
controllerContext.partitionReplicaAssignment = ZkUtils.getReplicaAssignmentForTopics(zkClient, controllerContext.allTopics.toSeq)
controllerContext.allLeaders = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
// update the leader and isr cache for all existing partitions from Zookeeper
updateLeaderAndIsrCache()
// start the channel manager
startChannelManager()
info("Currently active brokers in the cluster: %s".format(controllerContext.liveBrokerIds))
info("Currently shutting brokers in the cluster: %s".format(controllerContext.shuttingDownBrokerIds))
info("Current list of topics in the cluster: %s".format(controllerContext.allTopics))
initializeAndMaybeTriggerPartitionReassignment()
initializeAndMaybeTriggerPreferredReplicaElection()
}
private def initializeAndMaybeTriggerPartitionReassignment() {
// read the partitions being reassigned from zookeeper path /admin/reassign_partitions
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient)
// check if they are already completed
val reassignedPartitions = partitionsBeingReassigned.filter(partition =>
controllerContext.partitionReplicaAssignment(partition._1) == partition._2.newReplicas).map(_._1)
reassignedPartitions.foreach(p => removePartitionFromReassignedPartitions(p))
controllerContext.partitionsBeingReassigned ++= partitionsBeingReassigned
controllerContext.partitionsBeingReassigned --= reassignedPartitions
info("Partitions being reassigned: %s".format(partitionsBeingReassigned.toString()))
info("Partitions already reassigned: %s".format(reassignedPartitions.toString()))
info("Resuming reassignment of partitions: %s".format(controllerContext.partitionsBeingReassigned.toString()))
controllerContext.partitionsBeingReassigned.foreach(partition => onPartitionReassignment(partition._1, partition._2))
}
private def initializeAndMaybeTriggerPreferredReplicaElection() {
// read the partitions undergoing preferred replica election from zookeeper path
val partitionsUndergoingPreferredReplicaElection = ZkUtils.getPartitionsUndergoingPreferredReplicaElection(zkClient)
// check if they are already completed
val partitionsThatCompletedPreferredReplicaElection = partitionsUndergoingPreferredReplicaElection.filter(partition =>
controllerContext.allLeaders(partition).leaderAndIsr.leader == controllerContext.partitionReplicaAssignment(partition).head)
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitionsUndergoingPreferredReplicaElection
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsThatCompletedPreferredReplicaElection
info("Partitions undergoing preferred replica election: %s".format(partitionsUndergoingPreferredReplicaElection.mkString(",")))
info("Partitions that completed preferred replica election: %s".format(partitionsThatCompletedPreferredReplicaElection.mkString(",")))
info("Resuming preferred replica election for partitions: %s".format(controllerContext.partitionsUndergoingPreferredReplicaElection.mkString(",")))
onPreferredReplicaElection(controllerContext.partitionsUndergoingPreferredReplicaElection.toSet)
}
private def startChannelManager() {
controllerContext.controllerChannelManager = new ControllerChannelManager(controllerContext.liveBrokers, config)
controllerContext.controllerChannelManager.startup()
}
private def updateLeaderAndIsrCache() {
val leaderAndIsrInfo = ZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, controllerContext.allTopics.toSeq)
for((topicPartition, leaderIsrAndControllerEpoch) <- leaderAndIsrInfo) {
// If the leader specified in the leaderAndIsr is no longer alive, there is no need to recover it
controllerContext.liveBrokerIds.contains(leaderIsrAndControllerEpoch.leaderAndIsr.leader) match {
case true =>
controllerContext.allLeaders.put(topicPartition, leaderIsrAndControllerEpoch)
case false =>
debug("While refreshing controller's leader and isr cache, leader %d for ".format(leaderIsrAndControllerEpoch.leaderAndIsr.leader) +
"partition %s is dead, just ignore it".format(topicPartition))
}
}
}
private def areReplicasInIsr(topic: String, partition: Int, replicas: Seq[Int]): Boolean = {
getLeaderAndIsrForPartition(zkClient, topic, partition) match {
case Some(leaderAndIsr) =>
val replicasNotInIsr = replicas.filterNot(r => leaderAndIsr.isr.contains(r))
replicasNotInIsr.isEmpty
case None => false
}
}
private def moveReassignedPartitionLeaderIfRequired(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val currentLeader = controllerContext.allLeaders(topicAndPartition).leaderAndIsr.leader
if(!reassignedPartitionContext.newReplicas.contains(currentLeader)) {
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is not in the new list of replicas %s. Re-electing leader".format(reassignedReplicas.mkString(",")))
// move the leader to one of the alive and caught up new replicas
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
} else {
// check if the leader is alive or not
controllerContext.liveBrokerIds.contains(currentLeader) match {
case true =>
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s and is alive".format(reassignedReplicas.mkString(",")))
case false =>
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s but is dead".format(reassignedReplicas.mkString(",")))
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
}
}
}
private def stopOldReplicasOfReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val topic = topicAndPartition.topic
val partition = topicAndPartition.partition
// send stop replica state change request to the old replicas
val oldReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition).toSet -- reassignedReplicas.toSet
// first move the replica to offline state (the controller removes it from the ISR)
oldReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topic, partition, replica)), OfflineReplica)
}
// send stop replica command to the old replicas
oldReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topic, partition, replica)), NonExistentReplica)
}
}
private def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val partitionsAndReplicasForThisTopic = controllerContext.partitionReplicaAssignment.filter(_._1.topic.equals(topicAndPartition.topic))
partitionsAndReplicasForThisTopic.put(topicAndPartition, reassignedReplicas)
updateAssignedReplicasForPartition(topicAndPartition, partitionsAndReplicasForThisTopic)
info("Updated assigned replicas for partition %s being reassigned to %s ".format(topicAndPartition, reassignedReplicas.mkString(",")))
// update the assigned replica list after a successful zookeeper write
controllerContext.partitionReplicaAssignment.put(topicAndPartition, reassignedReplicas)
// stop watching the ISR changes for this partition
zkClient.unsubscribeDataChanges(ZkUtils.getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition),
controllerContext.partitionsBeingReassigned(topicAndPartition).isrChangeListener)
// update the assigned replica list
controllerContext.partitionReplicaAssignment.put(topicAndPartition, reassignedReplicas)
}
private def startNewReplicasForReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
// send the start replica request to the brokers in the reassigned replicas list that are not in the assigned
// replicas list
val assignedReplicaSet = Set.empty[Int] ++ controllerContext.partitionReplicaAssignment(topicAndPartition)
val reassignedReplicaSet = Set.empty[Int] ++ reassignedPartitionContext.newReplicas
val newReplicas: Seq[Int] = (reassignedReplicaSet -- assignedReplicaSet).toSeq
newReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, replica)), NewReplica)
}
}
private def registerReassignedPartitionsListener() = {
zkClient.subscribeDataChanges(ZkUtils.ReassignPartitionsPath, new PartitionsReassignedListener(this))
}
private def registerPreferredReplicaElectionListener() {
zkClient.subscribeDataChanges(ZkUtils.PreferredReplicaLeaderElectionPath, new PreferredReplicaElectionListener(this))
}
private def registerControllerChangedListener() {
zkClient.subscribeDataChanges(ZkUtils.ControllerEpochPath, new ControllerEpochListener(this))
}
def removePartitionFromReassignedPartitions(topicAndPartition: TopicAndPartition) {
// read the current list of reassigned partitions from zookeeper
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient)
// remove this partition from that list
val updatedPartitionsBeingReassigned = partitionsBeingReassigned - topicAndPartition
// write the new list to zookeeper
ZkUtils.updatePartitionReassignmentData(zkClient, updatedPartitionsBeingReassigned.mapValues(_.newReplicas))
// update the cache
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
}
def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
newReplicaAssignmentForTopic: Map[TopicAndPartition, Seq[Int]]) {
try {
val zkPath = ZkUtils.getTopicPath(topicAndPartition.topic)
val jsonPartitionMap = Utils.mapToJson(newReplicaAssignmentForTopic.map(e =>
(e._1.partition.toString -> e._2.map(_.toString))))
ZkUtils.updatePersistentPath(zkClient, zkPath, jsonPartitionMap)
debug("Updated path %s with %s for replica assignment".format(zkPath, jsonPartitionMap))
} catch {
case e: ZkNoNodeException => throw new IllegalStateException("Topic %s doesn't exist".format(topicAndPartition.topic))
case e2 => throw new KafkaException(e2.toString)
}
}
def removePartitionsFromPreferredReplicaElection(partitionsToBeRemoved: Set[TopicAndPartition]) {
for(partition <- partitionsToBeRemoved) {
// check the status
val currentLeader = controllerContext.allLeaders(partition).leaderAndIsr.leader
val preferredReplica = controllerContext.partitionReplicaAssignment(partition).head
if(currentLeader == preferredReplica) {
info("Partition %s completed preferred replica leader election. New leader is %d".format(partition, preferredReplica))
} else {
warn("Partition %s failed to complete preferred replica leader election. Leader is %d".format(partition, currentLeader))
}
}
ZkUtils.deletePath(zkClient, ZkUtils.PreferredReplicaLeaderElectionPath)
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsToBeRemoved
}
private def getAllReplicasForPartition(partitions: Set[TopicAndPartition]): Set[PartitionAndReplica] = {
partitions.map { p =>
val replicas = controllerContext.partitionReplicaAssignment(p)
replicas.map(r => new PartitionAndReplica(p.topic, p.partition, r))
}.flatten
}
/**
* Removes a given partition replica from the ISR; if it is not the current
* leader and there are sufficient remaining replicas in ISR.
* @param topic topic
* @param partition partition
* @param replicaId replica Id
* @return the new leaderAndIsr (with the replica removed if it was present),
* or None if leaderAndIsr is empty.
*/
def removeReplicaFromIsr(topic: String, partition: Int, replicaId: Int): Option[LeaderIsrAndControllerEpoch] = {
val topicAndPartition = TopicAndPartition(topic, partition)
debug("Removing replica %d from ISR of %s.".format(replicaId, topicAndPartition))
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
val leaderIsrAndEpochOpt = ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition)
zkWriteCompleteOrUnnecessary = leaderIsrAndEpochOpt match {
case Some(leaderIsrAndEpoch) => // increment the leader epoch even if the ISR changes
val leaderAndIsr = leaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
if (leaderAndIsr.isr.contains(replicaId)) {
val newLeaderAndIsr = new LeaderAndIsr(leaderAndIsr.leader, leaderAndIsr.leaderEpoch + 1,
leaderAndIsr.isr.filter(b => b != replicaId), leaderAndIsr.zkVersion + 1)
// update the new leadership decision in zookeeper or retry
val (updateSucceeded, newVersion) = ZkUtils.conditionalUpdatePersistentPath(
zkClient,
ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition),
ZkUtils.leaderAndIsrZkData(newLeaderAndIsr, epoch),
leaderAndIsr.zkVersion)
newLeaderAndIsr.zkVersion = newVersion
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(newLeaderAndIsr, epoch))
if (updateSucceeded)
info("New leader and ISR for partition [%s, %d] is %s"
.format(topic, partition, newLeaderAndIsr.toString()))
updateSucceeded
} else {
warn("Cannot remove replica %d from ISR of %s. Leader = %d ; ISR = %s"
.format(replicaId, topicAndPartition, leaderAndIsr.leader, leaderAndIsr.isr))
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(leaderAndIsr, epoch))
true
}
case None =>
warn("Cannot remove replica %d from ISR of %s - leaderAndIsr is empty.".format(replicaId, topicAndPartition))
true
}
}
finalLeaderIsrAndControllerEpoch
}
class SessionExpirationListener() extends IZkStateListener with Logging {
this.logIdent = "[SessionExpirationListener on " + config.brokerId + "], "
@throws(classOf[Exception])
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleNewSession() {
controllerContext.controllerLock synchronized {
Utils.unregisterMBean(KafkaController.MBeanName)
partitionStateMachine.shutdown()
replicaStateMachine.shutdown()
if(controllerContext.controllerChannelManager != null) {
info("session expires, clean up the state")
controllerContext.controllerChannelManager.shutdown()
controllerContext.controllerChannelManager = null
}
controllerElector.elect
}
}
}
}
/**
* Starts the partition reassignment process unless -
* 1. Partition previously existed
* 2. New replicas are the same as existing replicas
* 3. Any replica in the new set of replicas are dead
* If any of the above conditions are satisfied, it logs an error and removes the partition from list of reassigned
* partitions.
*/
class PartitionsReassignedListener(controller: KafkaController) extends IZkDataListener with Logging {
this.logIdent = "[PartitionsReassignedListener on " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions are reassigned by the admin command
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
debug("Partitions reassigned listener fired for path %s. Record partitions to be reassigned %s"
.format(dataPath, data))
val partitionsReassignmentData = ZkUtils.parsePartitionReassignmentData(data.toString)
val newPartitions = partitionsReassignmentData.filterNot(p => controllerContext.partitionsBeingReassigned.contains(p._1))
newPartitions.foreach { partitionToBeReassigned =>
controllerContext.controllerLock synchronized {
val topic = partitionToBeReassigned._1.topic
val partition = partitionToBeReassigned._1.partition
val newReplicas = partitionToBeReassigned._2
val topicAndPartition = partitionToBeReassigned._1
val aliveNewReplicas = newReplicas.filter(r => controllerContext.liveBrokerIds.contains(r))
try {
val assignedReplicasOpt = controllerContext.partitionReplicaAssignment.get(topicAndPartition)
assignedReplicasOpt match {
case Some(assignedReplicas) =>
if(assignedReplicas == newReplicas) {
throw new KafkaException("Partition %s to be reassigned is already assigned to replicas"
.format(topicAndPartition) +
" %s. Ignoring request for partition reassignment".format(newReplicas.mkString(",")))
} else {
if(aliveNewReplicas == newReplicas) {
info("Handling reassignment of partition %s to new replicas %s".format(topicAndPartition,
newReplicas.mkString(",")))
val context = createReassignmentContextForPartition(topic, partition, newReplicas)
controllerContext.partitionsBeingReassigned.put(topicAndPartition, context)
controller.onPartitionReassignment(topicAndPartition, context)
} else {
// some replica in RAR is not alive. Fail partition reassignment
throw new KafkaException("Only %s replicas out of the new set of replicas".format(aliveNewReplicas.mkString(",")) +
" %s for partition %s to be reassigned are alive. ".format(newReplicas.mkString(","), topicAndPartition) +
"Failing partition reassignment")
}
}
case None => throw new KafkaException("Attempt to reassign partition %s that doesn't exist"
.format(topicAndPartition))
}
} catch {
case e => error("Error completing reassignment of partition %s".format(topicAndPartition), e)
// remove the partition from the admin path to unblock the admin client
controller.removePartitionFromReassignedPartitions(topicAndPartition)
}
}
}
}
/**
* Called when the leader information stored in zookeeper has been delete. Try to elect as the leader
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
private def createReassignmentContextForPartition(topic: String,
partition: Int,
newReplicas: Seq[Int]): ReassignedPartitionsContext = {
val context = new ReassignedPartitionsContext(newReplicas)
// first register ISR change listener
watchIsrChangesForReassignedPartition(topic, partition, context)
context
}
private def watchIsrChangesForReassignedPartition(topic: String, partition: Int,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val isrChangeListener = new ReassignedPartitionsIsrChangeListener(controller, topic, partition,
reassignedReplicas.toSet)
reassignedPartitionContext.isrChangeListener = isrChangeListener
// register listener on the leader and isr path to wait until they catch up with the current leader
zkClient.subscribeDataChanges(ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition), isrChangeListener)
}
}
class ReassignedPartitionsIsrChangeListener(controller: KafkaController, topic: String, partition: Int,
reassignedReplicas: Set[Int])
extends IZkDataListener with Logging {
this.logIdent = "[ReassignedPartitionsIsrChangeListener on controller " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions need to move leader to preferred replica
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
try {
controllerContext.controllerLock synchronized {
debug("Reassigned partitions isr change listener fired for path %s with children %s".format(dataPath, data))
// check if this partition is still being reassigned or not
val topicAndPartition = TopicAndPartition(topic, partition)
controllerContext.partitionsBeingReassigned.get(topicAndPartition) match {
case Some(reassignedPartitionContext) =>
// need to re-read leader and isr from zookeeper since the zkclient callback doesn't return the Stat object
val newLeaderAndIsrOpt = ZkUtils.getLeaderAndIsrForPartition(zkClient, topic, partition)
newLeaderAndIsrOpt match {
case Some(leaderAndIsr) => // check if new replicas have joined ISR
val caughtUpReplicas = reassignedReplicas & leaderAndIsr.isr.toSet
if(caughtUpReplicas == reassignedReplicas) {
// resume the partition reassignment process
info("%d/%d replicas have caught up with the leader for partition [%s, %d] being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topic, partition) +
"Resuming partition reassignment")
controller.onPartitionReassignment(topicAndPartition, reassignedPartitionContext)
}
else {
info("%d/%d replicas have caught up with the leader for partition [%s, %d] being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topic, partition) +
"Replica(s) %s still need to catch up".format((reassignedReplicas -- leaderAndIsr.isr.toSet).mkString(",")))
}
case None => error("Error handling reassignment of partition [%s, %d] to replicas %s as it was never created"
.format(topic, partition, reassignedReplicas.mkString(",")))
}
case None =>
}
}
}catch {
case e => error("Error while handling partition reassignment", e)
}
}
/**
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
/**
* Starts the preferred replica leader election for the list of partitions specified under
* /admin/preferred_replica_election -
*/
class PreferredReplicaElectionListener(controller: KafkaController) extends IZkDataListener with Logging {
this.logIdent = "[PreferredReplicaElectionListener on " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions are reassigned by the admin command
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
debug("Preferred replica election listener fired for path %s. Record partitions to undergo preferred replica election" +
" %s".format(dataPath, data.toString))
val partitionsForPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaJsonData(data.toString)
val newPartitions = partitionsForPreferredReplicaElection -- controllerContext.partitionsUndergoingPreferredReplicaElection
controllerContext.controllerLock synchronized {
try {
controller.onPreferredReplicaElection(newPartitions)
} catch {
case e => error("Error completing preferred replica leader election for partitions %s"
.format(partitionsForPreferredReplicaElection.mkString(",")), e)
} finally {
controller.removePartitionsFromPreferredReplicaElection(newPartitions)
}
}
}
/**
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
class ControllerEpochListener(controller: KafkaController) extends IZkDataListener with Logging {
this.logIdent = "[ControllerEpochListener on " + controller.config.brokerId + "]: "
val controllerContext = controller.controllerContext
readControllerEpochFromZookeeper()
/**
* Invoked when a controller updates the epoch value
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
debug("Controller epoch listener fired with new epoch " + data.toString)
controllerContext.controllerLock synchronized {
// read the epoch path to get the zk version
readControllerEpochFromZookeeper()
}
}
/**
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
private def readControllerEpochFromZookeeper() {
// initialize the controller epoch and zk version by reading from zookeeper
if(ZkUtils.pathExists(controllerContext.zkClient, ZkUtils.ControllerEpochPath)) {
val epochData = ZkUtils.readData(controllerContext.zkClient, ZkUtils.ControllerEpochPath)
controllerContext.epoch = epochData._1.toInt
controllerContext.epochZkVersion = epochData._2.getVersion
info("Initialized controller epoch to %d and zk version %d".format(controllerContext.epoch, controllerContext.epochZkVersion))
}
}
}
case class ReassignedPartitionsContext(var newReplicas: Seq[Int] = Seq.empty,
var isrChangeListener: ReassignedPartitionsIsrChangeListener = null)
case class PartitionAndReplica(topic: String, partition: Int, replica: Int)
case class LeaderIsrAndControllerEpoch(val leaderAndIsr: LeaderAndIsr, controllerEpoch: Int)
object ControllerStats extends KafkaMetricsGroup {
val offlinePartitionRate = newMeter("OfflinePartitionsPerSec", "partitions", TimeUnit.SECONDS)
val uncleanLeaderElectionRate = newMeter("UncleanLeaderElectionsPerSec", "elections", TimeUnit.SECONDS)
val leaderElectionTimer = new KafkaTimer(newTimer("LeaderElectionRateAndTimeMs", TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
}
| dchenbecker/kafka-sbt | core/src/main/scala/kafka/controller/KafkaController.scala | Scala | apache-2.0 | 51,420 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.flink
import org.apache.flink.table.api.Table
import org.apache.flink.table.api.scala.BatchTableEnvironment
import org.apache.zeppelin.interpreter.{InterpreterContext, InterpreterResult}
class FlinkSQLScalaInterpreter(scalaInterpreter: FlinkScalaInterpreter,
z: FlinkZeppelinContext,
maxRow: Int) {
private var btenv: BatchTableEnvironment = scalaInterpreter.getBatchTableEnviroment()
def interpret(code: String, context: InterpreterContext): InterpreterResult = {
try {
val table: Table = this.btenv.sqlQuery(code)
val result = z.showData(table)
return new InterpreterResult(InterpreterResult.Code.SUCCESS, result)
} catch {
case e: Exception =>
return new InterpreterResult(InterpreterResult.Code.ERROR,
"Fail to fetch result: " + e.getMessage)
}
}
}
| sergeymazin/zeppelin | flink/src/main/scala/org/apache/zeppelin/flink/FlinkSQLScalaInterpreter.scala | Scala | apache-2.0 | 1,714 |
package io.iohk.ethereum.consensus.validators
import akka.util.ByteString
import io.iohk.ethereum.blockchain.sync.EphemBlockchainTestSetup
import io.iohk.ethereum.consensus.difficulty.DifficultyCalculator
import io.iohk.ethereum.consensus.ethash.difficulty.EthashDifficultyCalculator
import io.iohk.ethereum.consensus.ethash.validators.EthashBlockHeaderValidator
import io.iohk.ethereum.consensus.validators.BlockHeaderError._
import io.iohk.ethereum.consensus.validators.BlockHeaderValidator._
import io.iohk.ethereum.domain.{UInt256, _}
import io.iohk.ethereum.utils.{BlockchainConfig, DaoForkConfig}
import io.iohk.ethereum.{Fixtures, ObjectGenerators}
import org.bouncycastle.util.encoders.Hex
import org.scalamock.scalatest.MockFactory
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import io.iohk.ethereum.domain.BlockHeader.HeaderExtraFields._
// scalastyle:off magic.number
class BlockHeaderValidatorSpec
extends AnyFlatSpec
with Matchers
with ScalaCheckPropertyChecks
with ObjectGenerators
with MockFactory {
val ExtraDataSizeLimit = 20
val blockchainConfig: BlockchainConfig = createBlockchainConfig()
val blockHeaderValidator = new EthashBlockHeaderValidator(blockchainConfig)
val difficultyCalculator = new EthashDifficultyCalculator(blockchainConfig)
"BlockHeaderValidator" should "validate correctly formed BlockHeaders" in {
blockHeaderValidator.validate(validBlockHeader, validParent.header) match {
case Right(_) => succeed
case _ => fail
}
}
it should "return a failure if created based on invalid extra data" in {
forAll(randomSizeByteStringGen(MaxExtraDataSize + 1, MaxExtraDataSize + ExtraDataSizeLimit)) { wrongExtraData =>
val invalidBlockHeader = validBlockHeader.copy(extraData = wrongExtraData)
assert(blockHeaderValidator.validate(invalidBlockHeader, validParent.header) == Left(HeaderExtraDataError))
}
}
it should "validate DAO block (extra data)" in {
import Fixtures.Blocks._
val cases = Table(
("Block", "Parent Block", "Supports Dao Fork", "Valid"),
(DaoForkBlock.header, DaoParentBlock.block, false, true),
(DaoForkBlock.header, DaoParentBlock.block, true, false),
(ProDaoForkBlock.header, DaoParentBlock.block, true, true),
(ProDaoForkBlock.header, DaoParentBlock.block, false, true), // We don't care for extra data if no pro dao
(ProDaoForkBlock.header.copy(extraData = ByteString("Wrond DAO Extra")), DaoParentBlock.block, true, false),
// We need to check extradata up to 10 blocks after
(ProDaoBlock1920009Header, Block(ProDaoBlock1920008Header, validParentBlockBody), true, true),
(
ProDaoBlock1920009Header.copy(extraData = ByteString("Wrond DAO Extra")),
Block(ProDaoBlock1920008Header, validParentBlockBody),
true,
false
),
(ProDaoBlock1920010Header, Block(ProDaoBlock1920009Header, validParentBlockBody), true, true)
)
forAll(cases) { (blockHeader, parentBlock, supportsDaoFork, valid) =>
val blockHeaderValidator = new EthashBlockHeaderValidator(createBlockchainConfig(supportsDaoFork))
blockHeaderValidator.validate(blockHeader, parentBlock.header) match {
case Right(_) => assert(valid)
case Left(DaoHeaderExtraDataError) => assert(!valid)
case _ => fail()
}
}
}
it should "return a failure if created based on invalid timestamp" in {
forAll(longGen) { timestamp =>
val blockHeader = validBlockHeader.copy(unixTimestamp = timestamp)
val validateResult = blockHeaderValidator.validate(blockHeader, validParent.header)
timestamp match {
case t if t <= validParentBlockHeader.unixTimestamp => assert(validateResult == Left(HeaderTimestampError))
case validBlockHeader.unixTimestamp => assert(validateResult == Right(BlockHeaderValid))
case _ => assert(validateResult == Left(HeaderDifficultyError))
}
}
}
it should "return a failure if created based on invalid difficulty" in {
forAll(bigIntGen) { difficulty =>
val blockHeader = validBlockHeader.copy(difficulty = difficulty)
val validateResult = blockHeaderValidator.validate(blockHeader, validParent.header)
if (difficulty != validBlockHeader.difficulty) assert(validateResult == Left(HeaderDifficultyError))
else assert(validateResult == Right(BlockHeaderValid))
}
}
it should "return a failure if created based on invalid gas used" in {
forAll(bigIntGen) { gasUsed =>
val blockHeader = validBlockHeader.copy(gasUsed = gasUsed)
val validateResult = blockHeaderValidator.validate(blockHeader, validParent.header)
if (gasUsed > validBlockHeader.gasLimit) assert(validateResult == Left(HeaderGasUsedError))
else assert(validateResult == Right(BlockHeaderValid))
}
}
it should "return a failure if created based on invalid negative gas used" in {
val gasUsed = -1
val blockHeader = validBlockHeader.copy(gasUsed = gasUsed)
val validateResult = blockHeaderValidator.validate(blockHeader, validParent.header)
assert(validateResult == Left(HeaderGasUsedError))
}
it should "return a failure if created based on invalid gas limit" in {
val LowerGasLimit =
MinGasLimit.max(validParentBlockHeader.gasLimit - validParentBlockHeader.gasLimit / GasLimitBoundDivisor + 1)
val UpperGasLimit = validParentBlockHeader.gasLimit + validParentBlockHeader.gasLimit / GasLimitBoundDivisor - 1
forAll(bigIntGen) { gasLimit =>
val blockHeader = validBlockHeader.copy(gasLimit = gasLimit)
val validateResult = blockHeaderValidator.validate(blockHeader, validParent.header)
if (gasLimit < LowerGasLimit || gasLimit > UpperGasLimit)
assert(validateResult == Left(HeaderGasLimitError))
else assert(validateResult == Right(BlockHeaderValid))
}
}
it should "return a failure if created with gas limit above threshold and block number >= eip106 block number" in {
val validParent = Block(validParentBlockHeader.copy(gasLimit = Long.MaxValue), validParentBlockBody)
val invalidBlockHeader = validBlockHeader.copy(gasLimit = BigInt(Long.MaxValue) + 1)
blockHeaderValidator.validate(invalidBlockHeader, validParent.header) shouldBe Left(HeaderGasLimitError)
}
it should "return a failure if created based on invalid number" in {
forAll(longGen) { number =>
val blockHeader = validBlockHeader.copy(number = number)
val parent = Block(validParentBlockHeader, validParentBlockBody)
val validateResult = blockHeaderValidator.validate(blockHeader, parent.header)
if (number != validParentBlockHeader.number + 1)
assert(validateResult == Left(HeaderNumberError) || validateResult == Left(HeaderDifficultyError))
else assert(validateResult == Right(BlockHeaderValid))
}
}
it should "return a failure if created based on invalid nonce/mixHash" in {
val invalidNonce = ByteString(Hex.decode("0b80f001ae0c017f"))
val invalidMixHash = ByteString(Hex.decode("1f947f00807f7f7f2f7f00ff82ff00de015980607f129c77afedff4680c10171"))
val blockHeaderWithInvalidNonce = validBlockHeader.copy(nonce = invalidNonce)
val blockHeaderWithInvalidMixHash = validBlockHeader.copy(mixHash = invalidMixHash)
val blockHeaderWithInvalidNonceAndMixHash = validBlockHeader.copy(nonce = invalidNonce, mixHash = invalidMixHash)
val parent = Block(validParentBlockHeader, validParentBlockBody)
blockHeaderValidator.validate(blockHeaderWithInvalidNonce, parent.header) shouldBe Left(HeaderPoWError)
blockHeaderValidator.validate(blockHeaderWithInvalidMixHash, parent.header) shouldBe Left(HeaderPoWError)
blockHeaderValidator.validate(blockHeaderWithInvalidNonceAndMixHash, parent.header) shouldBe Left(HeaderPoWError)
}
it should "validate correctly a block whose parent is in storage" in new EphemBlockchainTestSetup {
blockchain
.storeBlockHeader(validParentBlockHeader)
.and(blockchain.storeBlockBody(validParentBlockHeader.hash, validParentBlockBody))
.commit()
blockHeaderValidator.validate(validBlockHeader, blockchain.getBlockHeaderByHash _) match {
case Right(_) => succeed
case _ => fail
}
}
it should "return a failure if the parent's header is not in storage" in new EphemBlockchainTestSetup {
blockHeaderValidator.validate(validBlockHeader, blockchain.getBlockHeaderByHash _) match {
case Left(HeaderParentNotFoundError) => succeed
case _ => fail
}
}
it should "properly validate a block after difficulty bomb pause" in new EphemBlockchainTestSetup {
val parent = Block(pausedDifficultyBombBlockParent, parentBody)
val res = blockHeaderValidator.validate(pausedDifficultyBombBlock, parent.header)
res shouldBe Right(BlockHeaderValid)
}
it should "mark as valid a post ecip1098 block opt-out with opt out defined" in new EphemBlockchainTestSetup {
val ecip1098BlockNumber = validBlockHeader.number / 2
val blockchainConfigWithECIP1098Enabled: BlockchainConfig =
blockchainConfig.copy(ecip1098BlockNumber = ecip1098BlockNumber)
val blockHeaderValidator = new BlockValidatorWithPowMocked(blockchainConfigWithECIP1098Enabled)
val validHeader = validBlockHeader.copy(extraFields = HefPostEcip1098(treasuryOptOut = true))
val validationResult = blockHeaderValidator.validate(validHeader, validParentBlockHeader)
validationResult shouldBe Right(BlockHeaderValid)
}
it should "mark as invalid a pre ecip1098 block opt-out with opt out defined" in new EphemBlockchainTestSetup {
val ecip1098BlockNumber = validBlockHeader.number * 2
val blockchainConfigWithECIP1098Disabled: BlockchainConfig =
blockchainConfig.copy(ecip1098BlockNumber = ecip1098BlockNumber)
val blockHeaderValidator = new BlockValidatorWithPowMocked(blockchainConfigWithECIP1098Disabled)
val headerWithOptOutInvalidlyOn = validBlockHeader.copy(extraFields = HefPostEcip1098(treasuryOptOut = true))
val validationResult = blockHeaderValidator.validate(headerWithOptOutInvalidlyOn, validParentBlockHeader)
validationResult shouldBe Left(
HeaderExtraFieldsError(
headerWithOptOutInvalidlyOn.extraFields,
ecip1097Activated = false,
ecip1098Activated = false
)
)
}
it should "mark as invalid a post ecip1098 block opt-out with opt out undefined" in new EphemBlockchainTestSetup {
val ecip1098BlockNumber = validBlockHeader.number / 2
val blockchainConfigWithECIP1098Enabled: BlockchainConfig =
blockchainConfig.copy(ecip1098BlockNumber = ecip1098BlockNumber)
val blockHeaderValidator = new BlockValidatorWithPowMocked(blockchainConfigWithECIP1098Enabled)
val headerWithOptOutInvalidlyOn = validBlockHeader.copy(extraFields = HefEmpty)
val validationResult = blockHeaderValidator.validate(headerWithOptOutInvalidlyOn, validParentBlockHeader)
validationResult shouldBe Left(
HeaderExtraFieldsError(
headerWithOptOutInvalidlyOn.extraFields,
ecip1097Activated = false,
ecip1098Activated = true
)
)
}
it should "mark as invalid a post ecip1097 with checkpoint or opt out undefined" in new EphemBlockchainTestSetup {
val ecip1097and1098BlockNumber = validBlockHeader.number / 2
val blockchainConfigWithECIP1097and1098Enabled: BlockchainConfig = blockchainConfig.copy(
ecip1098BlockNumber = ecip1097and1098BlockNumber,
ecip1097BlockNumber = ecip1097and1098BlockNumber
)
val blockHeaderValidator = new BlockValidatorWithPowMocked(blockchainConfigWithECIP1097and1098Enabled)
val baseBlockHeader = validBlockHeader.copy(extraFields = HefEmpty)
val baseHeaderValidationResult = blockHeaderValidator.validate(baseBlockHeader, validParentBlockHeader)
baseHeaderValidationResult shouldBe Left(
HeaderExtraFieldsError(baseBlockHeader.extraFields, ecip1097Activated = true, ecip1098Activated = true)
)
val ecip1098BlockHeader = validBlockHeader.copy(extraFields = HefPostEcip1098(true))
val ecip1098HeaderValidationResult = blockHeaderValidator.validate(ecip1098BlockHeader, validParentBlockHeader)
ecip1098HeaderValidationResult shouldBe Left(
HeaderExtraFieldsError(ecip1098BlockHeader.extraFields, ecip1097Activated = true, ecip1098Activated = true)
)
}
it should "properly calculate the difficulty after difficulty bomb resume (with reward reduction)" in new EphemBlockchainTestSetup {
val parentHeader: BlockHeader =
validParentBlockHeader.copy(number = 5000101, unixTimestamp = 1513175023, difficulty = BigInt("22627021745803"))
val parent = Block(parentHeader, parentBody)
val blockNumber: BigInt = parentHeader.number + 1
val blockTimestamp: Long = parentHeader.unixTimestamp + 6
val difficulty: BigInt = difficultyCalculator.calculateDifficulty(blockNumber, blockTimestamp, parent.header)
val expected = BigInt("22638070358408")
difficulty shouldBe expected
}
it should "properly calculate the difficulty after difficulty defuse" in new EphemBlockchainTestSetup {
val parentHeader: BlockHeader =
validParentBlockHeader.copy(number = 5899999, unixTimestamp = 1525176000, difficulty = BigInt("22627021745803"))
val parent = Block(parentHeader, parentBody)
val blockNumber: BigInt = parentHeader.number + 1
val blockTimestamp: Long = parentHeader.unixTimestamp + 6
val difficulty: BigInt = difficultyCalculator.calculateDifficulty(blockNumber, blockTimestamp, parent.header)
val blockDifficultyWihtoutBomb = BigInt("22638070096264")
difficulty shouldBe blockDifficultyWihtoutBomb
}
it should "properly calculate a block after block reward reduction (without uncles)" in new EphemBlockchainTestSetup {
val parent = Block(afterRewardReductionParentBlockHeader, parentBody)
val blockNumber: BigInt = afterRewardReductionBlockHeader.number
val blockTimestamp: Long = afterRewardReductionBlockHeader.unixTimestamp
val difficulty: BigInt = difficultyCalculator.calculateDifficulty(blockNumber, blockTimestamp, parent.header)
/** Expected calculations:
* blockNumber = 5863375 // < 5900000
* timestampDiff = 6
* x = 3480699544328087 / 2048 =
* c = (1 - (6 / 9)) = 0,33 // > -99
* fakeBlockNumber = 5863375 - 3000000 = 2863375
* extraDifficulty = 134217728
* difficultyWithoutBomb = 3480699544328087 + 1699560324378,95 * 0,33 = 3481260399235132
*/
val blockDifficultyAfterRewardReduction = BigInt("3484099629090779")
difficulty shouldBe afterRewardReductionBlockHeader.difficulty
}
// FIXME: Replace with mocked miner validators once we have them
class BlockValidatorWithPowMocked(blockchainConfig: BlockchainConfig)
extends BlockHeaderValidatorSkeleton(blockchainConfig) {
override protected def difficulty: DifficultyCalculator = difficultyCalculator
override def validateEvenMore(
blockHeader: BlockHeader,
parentHeader: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] = Right(BlockHeaderValid)
}
val parentBody: BlockBody = BlockBody.empty
val pausedDifficultyBombBlock = BlockHeader(
parentHash = ByteString(Hex.decode("77af90df2b60071da7f11060747b6590a3bc2f357da4addccb5eef7cb8c2b723")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("10807cacf99ac84b7b8f9b4077e3a11ee8880bf9")),
stateRoot = ByteString(Hex.decode("32deebbf585e9b0d0153b96d62283e903c10fac41fc4181438e29732c490ac6e")),
transactionsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
receiptsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("20626433633447"),
number = 3582022,
gasLimit = 4700036,
gasUsed = 0,
unixTimestamp = 1492735637,
extraData = ByteString(Hex.decode("d58301050b8650617269747986312e31352e31826c69")),
mixHash = ByteString(Hex.decode("7d2db22c3dfaccb1b6927f5675ec24a41991ee4bcffdc564f940a45c1fce8acb")),
nonce = ByteString(Hex.decode("81d6a5e8029f9446"))
)
val pausedDifficultyBombBlockParent = BlockHeader(
parentHash = ByteString(Hex.decode("e6e90c1ba10df710365a2ae9f899bd787416d98f19874f4cb1a62f09c3b8277d")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("4c2b4e716883a2c3f6b980b70b577e54b9441060")),
stateRoot = ByteString(Hex.decode("0920dc025715c278dc297aa7b2d1bf5a60666d92be22d338135d13571539fad7")),
transactionsRoot = ByteString(Hex.decode("6616c23aeb486dd47aca667814ffed831553c7322440913b95847235a4c3bb97")),
receiptsRoot = ByteString(Hex.decode("5fa90473cd08a08fc766329651d81bb6e4ef2bb330cf90c3025927a3bafe0c57")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("20616098743527"),
number = 3582021,
gasLimit = 4699925,
gasUsed = 1005896,
unixTimestamp = 1492735634,
extraData = ByteString(Hex.decode("d58301050c8650617269747986312e31362e30826c69")),
mixHash = ByteString(Hex.decode("d10215664192800200eab9ca7b90f9ceb8d8428200c2b4e6aebe2191c2a52c0e")),
nonce = ByteString(Hex.decode("83e2d9b401cdfa77"))
)
val afterRewardReductionBlockHeader = BlockHeader(
parentHash = ByteString(Hex.decode("a5280b4589a1534946f83dba3fcec698be2046010c4d39fc0437c61837adc0f5")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("ea674fdde714fd979de3edf0f56aa9716b898ec8")),
stateRoot = ByteString.fromInts(0),
transactionsRoot = ByteString(Hex.decode("f868d6aa999090d90d802ff6b46ace5870a07a50fd935af0635bd95acf62262a")),
receiptsRoot = ByteString(Hex.decode("f868d6aa999090d90d802ff6b46ace5870a07a50fd935af0635bd95acf62262a")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("3482399171761329"),
number = 5863375,
gasLimit = 7999992,
gasUsed = 7998727,
unixTimestamp = 1530104899,
extraData = ByteString(Hex.decode("657468706f6f6c2e6f7267202855533129")),
mixHash = ByteString(Hex.decode("8f86617d6422c26a89b8b349b160973ca44f90326e758f1ef669c4046741dd06")),
nonce = ByteString(Hex.decode("2cc9a5500763ce09"))
)
val afterRewardReductionParentBlockHeader = BlockHeader(
parentHash = ByteString(Hex.decode("ce5633dd4e056415c9e170b1fd934d88eec437c8a6f58014a2a1ef801a132ac5")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("b2930b35844a230f00e51431acae96fe543a0347")),
stateRoot = ByteString.fromInts(0),
transactionsRoot = ByteString(Hex.decode("f868d6aa999090d90d802ff6b46ace5870a07a50fd935af0635bd95acf62262a")),
receiptsRoot = ByteString(Hex.decode("f868d6aa999090d90d802ff6b46ace5870a07a50fd935af0635bd95acf62262a")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("3480699544328087"),
number = 5863374,
gasLimit = 7992222,
gasUsed = 7980470,
unixTimestamp = 1530104893,
extraData = ByteString(Hex.decode("73656f3130")),
mixHash = ByteString(Hex.decode("8f86617d6422c26a89b8b349b160973ca44f90326e758f1ef669c4046741dd06")),
nonce = ByteString(Hex.decode("b9fa123002b9407d"))
)
val validBlockHeader = BlockHeader(
parentHash = ByteString(Hex.decode("d882d5c210bab4cb7ef0b9f3dc2130cb680959afcd9a8f9bf83ee6f13e2f9da3")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("95f484419881c6e9b6de7fb3f8ad03763bd49a89")),
stateRoot = ByteString(Hex.decode("634a2b20c9e02afdda7157afe384306c5acc4fb9c09b45dc0203c0fbb2fed0e6")),
transactionsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
receiptsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("989772"),
number = 20,
gasLimit = 131620495,
gasUsed = 0,
unixTimestamp = 1486752441,
extraData = ByteString(Hex.decode("d783010507846765746887676f312e372e33856c696e7578")),
mixHash = ByteString(Hex.decode("6bc729364c9b682cfa923ba9480367ebdfa2a9bca2a652fe975e8d5958f696dd")),
nonce = ByteString(Hex.decode("797a8f3a494f937b"))
)
val validParentBlockHeader = BlockHeader(
parentHash = ByteString(Hex.decode("677a5fb51d52321b03552e3c667f602cc489d15fc1d7824445aee6d94a9db2e7")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("95f484419881c6e9b6de7fb3f8ad03763bd49a89")),
stateRoot = ByteString(Hex.decode("cddeeb071e2f69ad765406fb7c96c0cd42ddfc6ec54535822b564906f9e38e44")),
transactionsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
receiptsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("989289"),
number = 19,
gasLimit = 131749155,
gasUsed = 0,
unixTimestamp = 1486752440,
extraData = ByteString(Hex.decode("d783010507846765746887676f312e372e33856c696e7578")),
mixHash = ByteString(Hex.decode("7f9ac1ddeafff0f926ed9887b8cf7d50c3f919d902e618b957022c46c8b404a6")),
nonce = ByteString(Hex.decode("3fc7bc671f7cee70"))
)
val validParentBlockBody = BlockBody(Seq.empty, Seq.empty)
val validParent = Block(validParentBlockHeader, validParentBlockBody)
def createBlockchainConfig(supportsDaoFork: Boolean = false): BlockchainConfig = {
import Fixtures.Blocks._
BlockchainConfig(
frontierBlockNumber = 0,
homesteadBlockNumber = 1150000,
difficultyBombPauseBlockNumber = 3000000,
difficultyBombContinueBlockNumber = 5000000,
difficultyBombRemovalBlockNumber = 5900000,
byzantiumBlockNumber = 4370000,
constantinopleBlockNumber = 7280000,
istanbulBlockNumber = 9069000,
daoForkConfig = Some(new DaoForkConfig {
override val blockExtraData: Option[ByteString] =
if (supportsDaoFork) Some(ProDaoForkBlock.header.extraData) else None
override val range: Int = 10
override val drainList: Seq[Address] = Nil
override val forkBlockHash: ByteString =
if (supportsDaoFork) ProDaoForkBlock.header.hash else DaoForkBlock.header.hash
override val forkBlockNumber: BigInt = DaoForkBlock.header.number
override val refundContract: Option[Address] = None
}),
// unused
maxCodeSize = None,
eip155BlockNumber = Long.MaxValue,
eip160BlockNumber = Long.MaxValue,
eip161BlockNumber = Long.MaxValue,
eip150BlockNumber = Long.MaxValue,
eip106BlockNumber = 0,
chainId = 0x3d.toByte,
networkId = 1,
monetaryPolicyConfig = null,
customGenesisFileOpt = None,
accountStartNonce = UInt256.Zero,
bootstrapNodes = Set(),
gasTieBreaker = false,
ethCompatibleStorage = true,
atlantisBlockNumber = Long.MaxValue,
aghartaBlockNumber = Long.MaxValue,
phoenixBlockNumber = Long.MaxValue,
petersburgBlockNumber = Long.MaxValue,
ecip1098BlockNumber = Long.MaxValue,
treasuryAddress = Address(0),
ecip1097BlockNumber = Long.MaxValue,
ecip1099BlockNumber = Long.MaxValue
)
}
val ProDaoBlock1920008Header = BlockHeader(
parentHash = ByteString(Hex.decode("05c45c9671ee31736b9f37ee98faa72c89e314059ecff3257206e6ab498eb9d1")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("2a65aca4d5fc5b5c859090a6c34d164135398226")),
stateRoot = ByteString(Hex.decode("fa8d3b3cbd37caba2faf09d5e472ae6c47a58d846751bc72306166a71d0fa4fa")),
transactionsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
receiptsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = BigInt("62230570926948"),
number = 1920008,
gasLimit = 4707788,
gasUsed = 0,
unixTimestamp = 1469021025,
extraData = ByteString(Hex.decode("64616f2d686172642d666f726b")),
mixHash = ByteString(Hex.decode("e73421390c1b084a9806754b238715ec333cdccc8d09b90cb6e38a9d1e247d6f")),
nonce = ByteString(Hex.decode("c207c8381305bef2"))
)
val ProDaoBlock1920009Header = BlockHeader(
parentHash = ByteString(Hex.decode("41254723e12eb736ddef151371e4c3d614233e6cad95f2d9017de2ab8b469a18")),
ommersHash = ByteString(Hex.decode("808d06176049aecfd504197dde49f46c3dd75f1af055e417d100228162eefdd8")),
beneficiary = ByteString(Hex.decode("ea674fdde714fd979de3edf0f56aa9716b898ec8")),
stateRoot = ByteString(Hex.decode("49eb333152713b78d920440ef065ed7f681611e0c2e6933d657d6f4a7f1936ee")),
transactionsRoot = ByteString(Hex.decode("a8060f1391fd4cbde4b03d83b32a1bda445578cd6ec6b7982db20c499ed3682b")),
receiptsRoot = ByteString(Hex.decode("ab66b1986e713eaf5621059e79f04ba9c528187c1b9da969f46442c3f915c120")),
logsBloom = ByteString(
Hex.decode(
"00000000000000020000000000020000000000000008000000000000000000000000000000000000000000000000400000000000000000000000000000202010000000000000000000000008000000000000000000000000400000000000000000000800000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001001000020000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000004000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000000"
)
),
difficulty = BigInt("62230571058020"),
number = 1920009,
gasLimit = 4712384,
gasUsed = 109952,
unixTimestamp = 1469021040,
extraData = ByteString(Hex.decode("64616f2d686172642d666f726b")),
mixHash = ByteString(Hex.decode("5bde79f4dc5be28af2d956e748a0d6ebc1f8eb5c1397e76729269e730611cb99")),
nonce = ByteString(Hex.decode("2b4b464c0a4da82a"))
)
val ProDaoBlock1920010Header = BlockHeader(
parentHash = ByteString(Hex.decode("69d04aec94ad69d7d190d3b51d24cd42dded0c4767598a1d30480363509acbef")),
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("4bb96091ee9d802ed039c4d1a5f6216f90f81b01")),
stateRoot = ByteString(Hex.decode("6ee63abee7416d3a671bcbefa01aa5d4ea427e246d548e15c5f3d9a108e738fd")),
transactionsRoot = ByteString(Hex.decode("0c6d4a643ed081f92e384a5853f14d7f5ff5d68b65d0c90b46159584a80effe0")),
receiptsRoot = ByteString(Hex.decode("a7d1ddb80060d4b77c07007e9a9f0b83413bd2c5de71501683ba4764982eef4b")),
logsBloom = ByteString(
Hex.decode(
"00000000000000020000000000020000001000000000000000000000000000000008000000000000000000000000400000000000000000000000000000202000000000000800000000000008000000000000000000000000400000000008000000000000000000000000000000000000000000000000000000000010000000000000000000000000000221000000000000000000080400000000000000011000020000000200001000000000000000000000000000000000400000000000000000000002000000000100000000000000000000000040000000000000000000000010000000000000000000000000000000000000000000000000000000000000"
)
),
difficulty = BigInt("62230571189092"),
number = 1920010,
gasLimit = 4712388,
gasUsed = 114754,
unixTimestamp = 1469021050,
extraData = ByteString(Hex.decode("657468706f6f6c2e6f7267202855533129")),
mixHash = ByteString(Hex.decode("8f86617d6422c26a89b8b349b160973ca44f90326e758f1ef669c4046741dd06")),
nonce = ByteString(Hex.decode("c7de19e00a8c3e32"))
)
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/consensus/validators/BlockHeaderValidatorSpec.scala | Scala | mit | 28,309 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.net.URI
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.types.StructType
/**
* A command used to create a data source table.
*
* Note: This is different from [[CreateTableCommand]]. Please check the syntax for difference.
* This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1 data_type [COMMENT col_comment], ...)]
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* }}}
*/
case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
assert(table.tableType != CatalogTableType.VIEW)
assert(table.provider.isDefined)
val sessionState = sparkSession.sessionState
if (sessionState.catalog.tableExists(table.identifier)) {
if (ignoreIfExists) {
return Seq.empty[Row]
} else {
throw new AnalysisException(s"Table ${table.identifier.unquotedString} already exists.")
}
}
// Create the relation to validate the arguments before writing the metadata to the metastore,
// and infer the table schema and partition if users didn't specify schema in CREATE TABLE.
val pathOption = table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
// Fill in some default table options from the session conf
val tableWithDefaultOptions = table.copy(
identifier = table.identifier.copy(
database = Some(
table.identifier.database.getOrElse(sessionState.catalog.getCurrentDatabase))),
tracksPartitionsInCatalog = sessionState.conf.manageFilesourcePartitions)
val dataSource: BaseRelation =
DataSource(
sparkSession = sparkSession,
userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema),
partitionColumns = table.partitionColumnNames,
className = table.provider.get,
bucketSpec = table.bucketSpec,
options = table.storage.properties ++ pathOption,
// As discussed in SPARK-19583, we don't check if the location is existed
catalogTable = Some(tableWithDefaultOptions)).resolveRelation(checkFilesExist = false)
val partitionColumnNames = if (table.schema.nonEmpty) {
table.partitionColumnNames
} else {
// This is guaranteed in `PreprocessDDL`.
assert(table.partitionColumnNames.isEmpty)
dataSource match {
case r: HadoopFsRelation => r.partitionSchema.fieldNames.toSeq
case _ => Nil
}
}
val newTable = dataSource match {
// Since Spark 2.1, we store the inferred schema of data source in metastore, to avoid
// inferring the schema again at read path. However if the data source has overlapped columns
// between data and partition schema, we can't store it in metastore as it breaks the
// assumption of table schema. Here we fallback to the behavior of Spark prior to 2.1, store
// empty schema in metastore and infer it at runtime. Note that this also means the new
// scalable partitioning handling feature(introduced at Spark 2.1) is disabled in this case.
case r: HadoopFsRelation if r.overlappedPartCols.nonEmpty =>
logWarning("It is not recommended to create a table with overlapped data and partition " +
"columns, as Spark cannot store a valid table schema and has to infer it at runtime, " +
"which hurts performance. Please check your data files and remove the partition " +
"columns in it.")
table.copy(schema = new StructType(), partitionColumnNames = Nil)
case _ =>
table.copy(
schema = dataSource.schema,
partitionColumnNames = partitionColumnNames,
// If metastore partition management for file source tables is enabled, we start off with
// partition provider hive, but no partitions in the metastore. The user has to call
// `msck repair table` to populate the table partitions.
tracksPartitionsInCatalog = partitionColumnNames.nonEmpty &&
sessionState.conf.manageFilesourcePartitions)
}
// We will return Nil or throw exception at the beginning if the table already exists, so when
// we reach here, the table should not exist and we should set `ignoreIfExists` to false.
sessionState.catalog.createTable(newTable, ignoreIfExists = false)
Seq.empty[Row]
}
}
/**
* A command used to create a data source table using the result of a query.
*
* Note: This is different from `CreateHiveTableAsSelectCommand`. Please check the syntax for
* difference. This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* AS SELECT ...
* }}}
*/
case class CreateDataSourceTableAsSelectCommand(
table: CatalogTable,
mode: SaveMode,
query: LogicalPlan,
outputColumnNames: Seq[String])
extends DataWritingCommand {
override def run(sparkSession: SparkSession, child: SparkPlan): Seq[Row] = {
assert(table.tableType != CatalogTableType.VIEW)
assert(table.provider.isDefined)
val sessionState = sparkSession.sessionState
val db = table.identifier.database.getOrElse(sessionState.catalog.getCurrentDatabase)
val tableIdentWithDB = table.identifier.copy(database = Some(db))
val tableName = tableIdentWithDB.unquotedString
if (sessionState.catalog.tableExists(tableIdentWithDB)) {
assert(mode != SaveMode.Overwrite,
s"Expect the table $tableName has been dropped when the save mode is Overwrite")
if (mode == SaveMode.ErrorIfExists) {
throw new AnalysisException(s"Table $tableName already exists. You need to drop it first.")
}
if (mode == SaveMode.Ignore) {
// Since the table already exists and the save mode is Ignore, we will just return.
return Seq.empty
}
saveDataIntoTable(
sparkSession, table, table.storage.locationUri, child, SaveMode.Append, tableExists = true)
} else {
assert(table.schema.isEmpty)
sparkSession.sessionState.catalog.validateTableLocation(table)
val tableLocation = if (table.tableType == CatalogTableType.MANAGED) {
Some(sessionState.catalog.defaultTablePath(table.identifier))
} else {
table.storage.locationUri
}
val result = saveDataIntoTable(
sparkSession, table, tableLocation, child, SaveMode.Overwrite, tableExists = false)
val newTable = table.copy(
storage = table.storage.copy(locationUri = tableLocation),
// We will use the schema of resolved.relation as the schema of the table (instead of
// the schema of df). It is important since the nullability may be changed by the relation
// provider (for example, see org.apache.spark.sql.parquet.DefaultSource).
schema = result.schema)
// Table location is already validated. No need to check it again during table creation.
sessionState.catalog.createTable(newTable, ignoreIfExists = false, validateLocation = false)
result match {
case fs: HadoopFsRelation if table.partitionColumnNames.nonEmpty &&
sparkSession.sqlContext.conf.manageFilesourcePartitions =>
// Need to recover partitions into the metastore so our saved data is visible.
sessionState.executePlan(AlterTableRecoverPartitionsCommand(table.identifier)).toRdd
case _ =>
}
}
CommandUtils.updateTableStats(sparkSession, table)
Seq.empty[Row]
}
private def saveDataIntoTable(
session: SparkSession,
table: CatalogTable,
tableLocation: Option[URI],
physicalPlan: SparkPlan,
mode: SaveMode,
tableExists: Boolean): BaseRelation = {
// Create the relation based on the input logical plan: `query`.
val pathOption = tableLocation.map("path" -> CatalogUtils.URIToString(_))
val dataSource = DataSource(
session,
className = table.provider.get,
partitionColumns = table.partitionColumnNames,
bucketSpec = table.bucketSpec,
options = table.storage.properties ++ pathOption,
catalogTable = if (tableExists) Some(table) else None)
try {
dataSource.writeAndRead(mode, query, outputColumnNames, physicalPlan)
} catch {
case ex: AnalysisException =>
logError(s"Failed to write to table ${table.identifier.unquotedString}", ex)
throw ex
}
}
}
| shuangshuangwang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala | Scala | apache-2.0 | 9,800 |
/*
* Copyright ActionML, LLC under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* ActionML licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actionml.circe
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import io.circe.Printer
trait CirceSupport extends FailFastCirceSupport {
implicit val jsonPrinter: Printer = Printer.spaces2
}
| actionml/harness | rest-server/common/src/main/scala/com/actionml/circe/CirceSupport.scala | Scala | apache-2.0 | 1,000 |
package io.policarp.logback
import ch.qos.logback.classic.Level
import ch.qos.logback.classic.spi.StackTraceElementProxy
import org.scalatest.{ Matchers, WordSpec }
class SplunkHecJsonLayoutTest extends WordSpec with Matchers {
"SplunkHecJsonLayout.parseStackTrace" should {
import SplunkHecJsonLayout.parseStackTrace
"parse stacktraces" in {
val stacktrace = (1 to 100).map(i => {
new StackTraceElementProxy(new StackTraceElement("errorClass", "someMethod", "someFile", i))
}).toArray
val event = MockLoggingEvent("SomeClass", "failure", Level.ERROR, stacktrace)
parseStackTrace(event, 500) match {
case Some(head :: tail) =>
1 + tail.size shouldBe 100
(head :: tail).forall(s => s.startsWith("errorClass.someMethod(someFile:")) shouldBe true
head shouldBe "errorClass.someMethod(someFile:1)"
tail.reverse.head shouldBe "errorClass.someMethod(someFile:100)"
case None | Some(Nil) => fail("Should not be empty or None")
}
parseStackTrace(event, 50) match {
case Some(head :: tail) =>
1 + tail.size shouldBe 51
(head :: tail).reverse.tail.forall(s => s.startsWith("errorClass.someMethod(someFile:")) shouldBe true
head shouldBe "errorClass.someMethod(someFile:1)"
tail.reverse.head shouldBe "..."
case None | Some(Nil) => fail("Should not be empty or None")
}
val emptyEvent = MockLoggingEvent("SomeClass", "failure", Level.ERROR)
parseStackTrace(emptyEvent, 25) match {
case Some(Nil) => assert(true, "Should be Nil")
case Some(_) => fail("Should be Nil")
case None => fail("Should not be None")
}
}
}
"SplunkHecJsonLayout" should {
"layout events" in {
val layout = new SplunkHecJsonLayout
layout.setMaxStackTrace(15)
val stacktrace = (1 to 20).map(i => {
new StackTraceElementProxy(new StackTraceElement("errorClass", "someMethod", "someFile", i))
}).toArray
val event = MockLoggingEvent("SomeClass", "failure", Level.ERROR, stacktrace)
layout.doLayout(event) shouldBe {
"""{"time":0,"event":{"message":"failure","level":"ERROR","thread":"fake-thread","logger":"SomeClass","exception":": failure\\n","stacktrace":["errorClass.someMethod(someFile:1)","errorClass.someMethod(someFile:2)","errorClass.someMethod(someFile:3)","errorClass.someMethod(someFile:4)","errorClass.someMethod(someFile:5)","errorClass.someMethod(someFile:6)","errorClass.someMethod(someFile:7)","errorClass.someMethod(someFile:8)","errorClass.someMethod(someFile:9)","errorClass.someMethod(someFile:10)","errorClass.someMethod(someFile:11)","errorClass.someMethod(someFile:12)","errorClass.someMethod(someFile:13)","errorClass.someMethod(someFile:14)","errorClass.someMethod(someFile:15)","..."]}}"""
}
}
"format set bean properties" in {
val layout = new SplunkHecJsonLayout
layout.setMaxStackTrace(5)
layout.setCustom("custom1=val1")
layout.setCustom("custom2=val2 ")
layout.setCustom(" custom3 = val3 ")
layout.setHost("test-host")
layout.setIndex("test-index")
layout.setSource("test-source")
layout.setSourcetype("test-sourcetype")
val stacktrace = (1 to 1000).map(i => {
new StackTraceElementProxy(new StackTraceElement("errorClass", "someMethod", "someFile", i))
}).toArray
val event = MockLoggingEvent("SomeClass", "failure", Level.ERROR, stacktrace)
layout.doLayout(event) shouldBe {
"""{"time":0,"event":{"message":"failure","level":"ERROR","thread":"fake-thread","logger":"SomeClass","exception":": failure\\n","stacktrace":["errorClass.someMethod(someFile:1)","errorClass.someMethod(someFile:2)","errorClass.someMethod(someFile:3)","errorClass.someMethod(someFile:4)","errorClass.someMethod(someFile:5)","..."],"customFields":{"custom2":"val2","custom1":"val1","custom3":"val3"}},"host":"test-host","source":"test-source","sourcetype":"test-sourcetype","index":"test-index"}"""
}
}
}
}
| kdrakon/splunk-logback-hec-appender | src/test/scala/io/policarp/logback/SplunkHecJsonLayoutTest.scala | Scala | apache-2.0 | 4,095 |
package org.bitcoins.db
import slick.jdbc.SQLiteProfile.api._
import scala.concurrent.{ExecutionContext, Future}
import java.sql.SQLException
import org.bitcoins.core.config.MainNet
/**
* Created by chris on 9/8/16.
* This is an abstract actor that can be used to implement any sort of
* actor that accesses a Postgres database. It creates
* read, update, upsert, and delete methods for your actor to call.
* You are responsible for the create function. You also need to specify
* the table and the database you are connecting to.
*/
abstract class CRUD[T, PrimaryKeyType](
implicit private val config: AppConfig,
private val ec: ExecutionContext)
extends DatabaseLogger {
/** The table inside our database we are inserting into */
val table: TableQuery[_ <: Table[T]]
/** Binding to the actual database itself, this is what is used to run querys */
def database: SafeDatabase = SafeDatabase(config)
/**
* create a record in the database
*
* @param t - the record to be inserted
* @return the inserted record
*/
def create(t: T): Future[T] = {
logger.trace(s"Writing $t to DB with config: ${config.config}")
createAll(Vector(t)).map(_.head)
}
def createAll(ts: Vector[T]): Future[Vector[T]]
/**
* read a record from the database
*
* @param id - the id of the record to be read
* @return Option[T] - the record if found, else none
*/
def read(id: PrimaryKeyType): Future[Option[T]] = {
logger.trace(s"Reading from DB with config: ${config.config}")
val query = findByPrimaryKey(id)
val rows: Future[Seq[T]] = database.run(query.result)
rows.map(_.headOption)
}
/** Update the corresponding record in the database */
def update(t: T): Future[T] = {
updateAll(Vector(t)).map { ts =>
ts.headOption match {
case Some(updated) => updated
case None => throw UpdateFailedException("Update failed for: " + t)
}
}
}
/** Updates all of the given ts in the database */
def updateAll(ts: Vector[T]): Future[Vector[T]] = {
val query = findAll(ts)
val actions = ts.map(t => query.update(t))
val affectedRows: Future[Vector[Int]] = database.run(DBIO.sequence(actions))
val updatedTs = findAll(ts)
affectedRows.flatMap { _ =>
database.runVec(updatedTs.result)
}
}
/**
* delete the corresponding record in the database
*
* @param t - the record to be deleted
* @return int - the number of rows affected by the deletion
*/
def delete(t: T): Future[Int] = {
logger.debug("Deleting record: " + t)
val query: Query[Table[_], T, Seq] = find(t)
database.run(query.delete)
}
/**
* insert the record if it does not exist, update it if it does
*
* @param t - the record to inserted / updated
* @return t - the record that has been inserted / updated
*/
def upsert(t: T): Future[T] = upsertAll(Vector(t)).map(_.head)
/** Upserts all of the given ts in the database, then returns the upserted values */
def upsertAll(ts: Vector[T]): Future[Vector[T]] = {
val actions = ts.map(t => table.insertOrUpdate(t))
val result: Future[Vector[Int]] = database.run(DBIO.sequence(actions))
val findQueryFuture = result.map(_ => findAll(ts).result)
findQueryFuture.flatMap(database.runVec(_))
}
/**
* return all rows that have a certain primary key
*
* @param id
* @return Query object corresponding to the selected rows
*/
protected def findByPrimaryKey(id: PrimaryKeyType): Query[Table[_], T, Seq] =
findByPrimaryKeys(Vector(id))
/** Finds the rows that correlate to the given primary keys */
protected def findByPrimaryKeys(
ids: Vector[PrimaryKeyType]): Query[Table[_], T, Seq]
/**
* return the row that corresponds with this record
*
* @param t - the row to find
* @return query - the sql query to find this record
*/
protected def find(t: T): Query[Table[_], T, Seq] = findAll(Vector(t))
protected def findAll(ts: Vector[T]): Query[Table[_], T, Seq]
/** Finds all elements in the table */
def findAll(): Future[Vector[T]] =
database.run(table.result).map(_.toVector)
}
case class SafeDatabase(config: AppConfig) extends DatabaseLogger {
implicit private val conf: AppConfig = config
import config.database
/**
* SQLite does not enable foreign keys by default. This query is
* used to enable it. It must be included in all connections to
* the database.
*/
private val foreignKeysPragma = sqlu"PRAGMA foreign_keys = TRUE;"
/** Logs the given action and error, if we are not on mainnet */
private def logAndThrowError(
action: DBIOAction[_, NoStream, _]): PartialFunction[Throwable, Nothing] = {
case err: SQLException =>
if (config.network != MainNet) {
logger.error(
s"Error when executing query ${action.getDumpInfo.getNamePlusMainInfo}")
logger.error(s"$err")
}
throw err
}
/** Runs the given DB action */
def run[R](action: DBIOAction[R, NoStream, _])(
implicit ec: ExecutionContext): Future[R] = {
val result = database.run[R](foreignKeysPragma >> action)
result.recoverWith { logAndThrowError(action) }
}
/**
* Runs the given DB sequence-returning DB action
* and converts the result to a vector
*/
def runVec[R](action: DBIOAction[Seq[R], NoStream, _])(
implicit ec: ExecutionContext): Future[Vector[R]] = {
val result = database.run[Seq[R]](foreignKeysPragma >> action)
result.map(_.toVector).recoverWith { logAndThrowError(action) }
}
}
case class UpdateFailedException(message: String)
extends RuntimeException(message)
| bitcoin-s/bitcoin-s-core | db-commons/src/main/scala/org/bitcoins/db/CRUD.scala | Scala | mit | 5,713 |
package sexamples.simulation.pingpong
import se.sics.kompics.sl._
import se.sics.kompics.network.Network
import se.sics.kompics.timer.{CancelPeriodicTimeout, SchedulePeriodicTimeout, Timeout, Timer}
import java.util.UUID
object Pinger {
class PingTimeout(_spt: SchedulePeriodicTimeout) extends Timeout(_spt);
}
class Pinger extends ComponentDefinition {
import Pinger.PingTimeout;
val self = cfg.getValue[TAddress]("pingpong.pinger.addr");
val ponger = cfg.getValue[TAddress]("pingpong.pinger.pongeraddr");
val timeoutPeriod = cfg.getValue[Long]("pingpong.pinger.timeout");
val net = requires[Network];
val timer = requires[Timer];
private var counter: Long = 0L;
private var timerId: Option[UUID] = None;
ctrl uponEvent {
case _: Start => {
val spt = new SchedulePeriodicTimeout(0, timeoutPeriod);
val timeout = new PingTimeout(spt);
spt.setTimeoutEvent(timeout);
trigger(spt -> timer);
timerId = Some(timeout.getTimeoutId());
}
}
net uponEvent {
case TMessage(_, Pong) => {
counter += 1L;
log.info(s"Got Pong #${counter}!");
}
}
timer uponEvent {
case _: PingTimeout => {
trigger(TMessage(self, ponger, Ping) -> net);
}
}
override def tearDown(): Unit = {
timerId match {
case Some(id) => {
trigger(new CancelPeriodicTimeout(id) -> timer);
}
case None => () // no cleanup necessary
}
}
}
| kompics/kompics-scala | docs/src/main/scala/sexamples/simulation/pingpong/Pinger.scala | Scala | gpl-2.0 | 1,441 |
package justin.db.replica
import java.util.UUID
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.storage.PluggableStorageProtocol.DataOriginality
import org.scalatest.{FlatSpec, Matchers}
class IsPrimaryOrReplicaTest extends FlatSpec with Matchers {
behavior of "Data Originality Resolver"
it should "reason exemplary data's id as a replica" in {
// given
val nodeId = NodeId(0)
val ring = Ring.apply(nodesSize = 3, partitionsSize = 21)
val resolver = new IsPrimaryOrReplica(nodeId, ring)
val id = UUID.fromString("179d6eb0-681d-4277-9caf-3d6d60e9faf9")
// when
val originality = resolver.apply(id)
// then
originality shouldBe a[DataOriginality.Replica]
}
it should "reason exemplary data's id as a primary" in {
// given
val nodeId = NodeId(0)
val ring = Ring.apply(nodesSize = 3, partitionsSize = 21)
val resolver = new IsPrimaryOrReplica(nodeId, ring)
val id = UUID.fromString("16ec44cd-5b4e-4b38-a647-206c1dc11b50")
// when
val originality = resolver.apply(id)
// then
originality shouldBe a[DataOriginality.Primary]
}
}
| speedcom/JustinDB | justin-core/src/test/scala/justin/db/replica/IsPrimaryOrReplicaTest.scala | Scala | apache-2.0 | 1,160 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.models.Sequential
import com.intel.analytics.zoo.pipeline.api.keras.serializer.ModuleSerializationTest
class LocallyConnected1DSpec extends KerasBaseSpec {
def weightConverter(data: Array[Tensor[Float]]): Array[Tensor[Float]] = {
val out = new Array[Tensor[Float]](data.length)
val d1l: Int = data(0).size(1)
val d2l: Int = data(0).size(2)
val d3l: Int = data(0).size(3)
out(0) = Tensor(d1l, d3l, d2l)
val page: Int = d2l * d3l
for (i <- 0 until d1l * d2l * d3l) {
val d1 = i / page + 1
val d2 = (i % page) / d3l + 1
val d3 = (i % page) % d3l + 1
val v = data(0).valueAt(d1, d2, d3)
out(0).setValue(d1, d3, d2, v)
}
if (data.length > 1) {
out(1) = data(1)
}
out
}
"LocallyConnected1D" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[12, 24])
|input = np.random.random([3, 12, 24])
|output_tensor = LocallyConnected1D(32, 3, activation="relu")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = Sequential[Float]()
val layer = LocallyConnected1D[Float](32, 3, activation = "relu",
inputShape = Shape(12, 24))
seq.add(layer)
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
"LocallyConnected1D without bias" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[32, 32])
|input = np.random.random([2, 32, 32])
|output_tensor = LocallyConnected1D(64, 4, subsample_length=2,
| bias=False)(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = Sequential[Float]()
val layer = LocallyConnected1D[Float](64, 4, subsampleLength = 2,
bias = false, inputShape = Shape(32, 32))
seq.add(layer)
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
}
class LocallyConnected1DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = LocallyConnected1D[Float](32, 3, inputShape = Shape(12, 24))
layer.build(Shape(2, 12, 24))
val input = Tensor[Float](2, 12, 24).rand()
runSerializationTest(layer, input)
}
}
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/LocallyConnected1DSpec.scala | Scala | apache-2.0 | 3,298 |
package usbinstall.controllers
import java.net.URL
import java.util.ResourceBundle
import javafx.event.ActionEvent
import javafx.fxml.{FXML, Initializable}
import javafx.scene.control.CheckBox
import javafx.stage.Stage
import scala.annotation.unused
import usbinstall.settings.ErrorAction
class InstallFailureController extends Initializable {
@FXML
protected var applyDefault: CheckBox = _
private var action = ErrorAction.Ask
def getAction: ErrorAction.Value = action
def getAsDefault: Boolean = applyDefault.isSelected
override def initialize(fxmlFileLocation: URL, resources: ResourceBundle): Unit = {
}
def onStop(@unused event: ActionEvent): Unit = {
action = ErrorAction.Stop
window.asInstanceOf[Stage].close()
}
def onContinue(@unused event: ActionEvent): Unit = {
action = ErrorAction.Skip
window.asInstanceOf[Stage].close()
}
private def window =
applyDefault.getScene.getWindow
}
| suiryc/usbinstall | src/main/scala/usbinstall/controllers/InstallFailureController.scala | Scala | gpl-3.0 | 947 |
package ohnosequences.tabula.action
import ohnosequences.tabula._, states._, actions._, tables._
trait AnyCreateTable extends AnyTableAction {
type InputState = InitialState[Table]
type OutputState = Creating[Table]
type Output = None.type
}
case class CreateTable[T <: AnyTable](val table: T)
extends AnyCreateTable { type Table = T }
| ohnosequences/tabula | src/main/scala/tabula/actions/CreateTable.scala | Scala | agpl-3.0 | 350 |
package chat.tox.antox.activities
import android.content.{Context, Intent}
import android.os.Bundle
import android.view.View
import chat.tox.antox.R
import chat.tox.antox.data.State
import chat.tox.antox.tox.MessageHelper
import chat.tox.antox.utils.Location
import chat.tox.antox.wrapper._
import im.tox.tox4j.core.enums.ToxMessageType
import rx.lang.scala.schedulers.{AndroidMainThreadScheduler, IOScheduler}
class GroupChatActivity extends GenericChatActivity[GroupKey] {
var photoPath: String = null
override def getKey(key: String): GroupKey = new GroupKey(key)
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
findViewById(R.id.info).setVisibility(View.GONE)
findViewById(R.id.call).setVisibility(View.GONE)
findViewById(R.id.video).setVisibility(View.GONE)
statusIconView.setVisibility(View.GONE)
}
override def onResume(): Unit = {
super.onResume()
val thisActivity = this
val db = State.db
titleSub = db.groupInfoList
.subscribeOn(IOScheduler())
.observeOn(AndroidMainThreadScheduler())
.subscribe(groupInfo => {
val id = activeKey
val mGroup: Option[GroupInfo] = groupInfo.find(groupInfo => groupInfo.key == id)
thisActivity.setDisplayName(mGroup.map(_.getDisplayName).getOrElse(""))
})
}
def onClickVoiceCallFriend(v: View): Unit = {}
def onClickVideoCallFriend(v: View): Unit = {}
def onClickInfo(v: View): Unit = {
val profile = new Intent(this, classOf[GroupProfileActivity])
profile.putExtra("key", activeKey.toString)
startActivity(profile)
}
override def onPause(): Unit = {
super.onPause()
}
override def sendMessage(message: String, messageType: ToxMessageType, context: Context): Unit = {
MessageHelper.sendGroupMessage(context, activeKey, message, messageType, None)
}
override def setTyping(typing: Boolean): Unit = {
// not yet implemented in toxcore
}
override def onClickInfo(): Unit = {
//TODO add a group profile activity
}
override def onClickVideoCall(clickLocation: Location): Unit = {
// not yet implemented in toxav
}
override def onClickVoiceCall(clickLocation: Location): Unit = {
// not yet implemented in toxav
}
}
| subliun/Antox | app/src/main/scala/chat/tox/antox/activities/GroupChatActivity.scala | Scala | gpl-3.0 | 2,279 |
package ch.jodersky.sbt.jni
package plugins
import build._
import sbt._
import sbt.Keys._
import sys.process._
/** Wraps a native build system in sbt tasks. */
object JniNative extends AutoPlugin {
object autoImport {
//Main task, inspect this first
val nativeCompile = taskKey[File](
"Builds a native library by calling the native build tool."
)
val nativePlatform = settingKey[String](
"Platform (architecture-kernel) of the system this build is running on."
)
val nativeBuildTool = taskKey[BuildTool](
"The build tool to be used when building a native library."
)
val nativeInit = inputKey[Seq[File]](
"Initialize a native build script from a template."
)
}
import autoImport._
val nativeBuildToolInstance = taskKey[BuildTool#Instance]("Get an instance of the current native build tool.")
lazy val settings: Seq[Setting[_]] = Seq(
// the value retruned must match that of `ch.jodersky.jni.PlatformMacros#current()` of project `macros`
nativePlatform := {
try {
val lines = Process("uname -sm").lineStream
if (lines.length == 0) {
sys.error("Error occured trying to run `uname`")
}
// uname -sm returns "<kernel> <hardware name>"
val parts = lines.head.split(" ")
if (parts.length != 2) {
sys.error("'uname -sm' returned unexpected string: " + lines.head)
} else {
val arch = parts(1).toLowerCase.replaceAll("\\\\s", "")
val kernel = parts(0).toLowerCase.replaceAll("\\\\s", "")
arch + "-" + kernel
}
} catch {
case ex: Exception =>
sLog.value.error("Error trying to determine platform.")
sLog.value.warn("Cannot determine platform! It will be set to 'unknown'.")
"unknown-unknown"
}
},
sourceDirectory in nativeCompile := sourceDirectory.value / "native",
target in nativeCompile := target.value / "native" / (nativePlatform).value,
nativeBuildTool := {
val tools = Seq(CMake)
val src = (sourceDirectory in nativeCompile).value
val tool = if (src.exists && src.isDirectory) {
tools.find(t => t detect src)
} else {
None
}
tool getOrElse sys.error("No supported native build tool detected. " +
s"Check that the setting 'sourceDirectory in nativeCompile' (currently set to $src) " +
"points to a directory containing a supported build script. Supported build tools are: " +
tools.map(_.name).mkString(",")
)
},
nativeBuildToolInstance := {
val tool = nativeBuildTool.value
val srcDir = (sourceDirectory in nativeCompile).value
val buildDir = (target in nativeCompile).value / "build"
IO.createDirectory(buildDir)
tool.getInstance(
baseDirectory = srcDir,
buildDirectory = buildDir,
logger = streams.value.log
)
},
clean in nativeCompile := {
val log = streams.value.log
log.debug("Cleaning native build")
try {
val toolInstance = nativeBuildToolInstance.value
toolInstance.clean()
} catch {
case ex: Exception =>
log.debug(s"Native cleaning failed: $ex")
}
},
nativeCompile := {
val tool = nativeBuildTool.value
val toolInstance = nativeBuildToolInstance.value
val targetDir = (target in nativeCompile).value / "bin"
val log = streams.value.log
IO.createDirectory(targetDir)
log.info(s"Building library with native build tool ${tool.name}")
val lib = toolInstance.library(targetDir)
log.success(s"Library built in ${lib.getAbsolutePath}")
lib
},
// also clean native sources
clean := {
clean.dependsOn(clean in nativeCompile).value
},
nativeInit := {
import complete.DefaultParsers._
val log = streams.value.log
def getTool(toolName: String): BuildTool = toolName.toLowerCase match {
case "cmake" => CMake
case _ => sys.error("Unsupported build tool: " + toolName)
}
val args = spaceDelimited("<tool> [<libname>]").parsed.toList
val (tool: BuildTool, lib: String) = args match {
case Nil => sys.error("Invalid arguments.")
case tool :: Nil => (getTool(tool), name.value)
case tool :: lib :: other => (getTool(tool), lib)
}
log.info(s"Initializing native build with ${tool.name} configuration")
val files = tool.initTemplate((sourceDirectory in nativeCompile).value, lib)
files foreach { file =>
log.info("Wrote to " + file.getAbsolutePath)
}
files
}
)
override lazy val projectSettings = settings
}
| jodersky/sbt-jni | plugin/src/main/scala/ch/jodersky/sbt/jni/plugins/JniNative.scala | Scala | bsd-3-clause | 4,732 |
import scala.collection.parallel.immutable.ParMap
import scala.util.Random
package object conway {
protected[conway] def randomize(width: Int, height: Int): ParMap[(Int, Int), Boolean] = {
var random_state = scala.collection.mutable.Map[(Int, Int), Boolean]()
for (x <- 1 to width) {
for (y <- 1 to height) {
random_state += (x, y) -> Random.nextBoolean()
}
}
random_state.toMap.par
}
}
| mkrogemann/sgol | src/main/scala/conway/package.scala | Scala | mit | 470 |
// Databricks notebook source exported at Sun, 28 Aug 2016 13:52:09 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Big Data Analysis for Humanities and Social Sciences](https://www.eventbrite.co.uk/e/big-data-analysis-for-the-humanities-and-social-sciences-tickets-26708754604)
// MAGIC
// MAGIC ### August 26, 2016, King's Digital Lab, King's College London
// MAGIC #### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # Old Bailey Online Data Analysis in Apache Spark
// MAGIC
// MAGIC 2016, by Raaz Sainudiin and James Smithies is licensed under [Creative Commons Attribution-NonCommercial 4.0 International License](http://creativecommons.org/licenses/by-nc/4.0/).
// MAGIC
// MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/xtraResources/OldBaileyOnline/OBO_LoadExtract.html) of this databricks notebook.
// MAGIC
// MAGIC This is a starting point for ETL of Old Bailey Online Data from [http://www.math.canterbury.ac.nz/~r.sainudiin/datasets/public/OldBailey/index.html](http://www.math.canterbury.ac.nz/~r.sainudiin/datasets/public/OldBailey/index.html).
// MAGIC
// MAGIC This work merely builds on [Old Bailey Online by Clive Emsley, Tim Hitchcock and Robert Shoemaker](https://www.oldbaileyonline.org/) that is licensed under a Creative Commons Attribution-NonCommercial 4.0 International License. Permissions beyond the scope of this license may be available at https://www.oldbaileyonline.org/static/Legal-info.jsp.
// COMMAND ----------
// MAGIC %md
// MAGIC The data is already loaded in dbfs (see dowloading and loading section below for these details).
// COMMAND ----------
1+1 // sanity check!
// COMMAND ----------
// MAGIC %md
// MAGIC # Analysing the Full Old Bailey Online Sessions Papers Dataset
// MAGIC First **Step 0: Dowloading and Loading Data (The Full Dataset)** below should have been done on the shard.
// MAGIC This currently cannot be done in Community Edition as the dataset is not loaded into the dbfs available in CE yet.
// MAGIC But the datset is in the academic shard and this is a walkthorugh of the Old Bailey Online data in the academic shard.
// MAGIC
// MAGIC Let's first check that the datasets are there in the distributed file system.
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/obo/tei/")) // full data if you have it - not in CE!!
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/obo/tei/ordinarysAccounts"))
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/obo/tei/sessionsPapers"))
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC ## Step 1: Exploring data first: xml parsing in scala
// MAGIC But, first let's understand the data and its structure.
// MAGIC
// MAGIC **Step 0: Dowloading and Loading Data (The Full Dataset)** should have been done already with data in dbfs alread.
// COMMAND ----------
val raw = sc.wholeTextFiles("dbfs:/datasets/obo/tei/ordinarysAccounts/OA17261103.xml")
// COMMAND ----------
val raw = sc.wholeTextFiles("dbfs:/datasets/obo/tei/sessionsPapers/17930109.xml") // has data on crimes and punishments
// COMMAND ----------
//val oboTest = sc.wholeTextFiles("dbfs:/datasets/obo/tei/ordinaryAccounts/OA1693072*.xml")
val xml = raw.map( x => x._2 )
val x = xml.take(1)(0) // getting content of xml file as a string
// COMMAND ----------
val elem = scala.xml.XML.loadString(x)
// COMMAND ----------
elem
// COMMAND ----------
(elem \\\\ "div0").map(Node => (Node \\ "@type").text) // types of div0 node, the singleton root node for the file
// COMMAND ----------
(elem \\\\ "div1").map(Node => (Node \\ "@type").text) // types of div1 node
// COMMAND ----------
(elem \\\\ "div1")
// COMMAND ----------
(elem \\\\ "div1").filter(Node => ((Node \\ "@type").text == "trialAccount"))
.map(Node => (Node \\ "@type", Node \\ "@id" ))
// COMMAND ----------
val trials = (elem \\\\ "div1").filter(Node => ((Node \\ "@type").text == "trialAccount"))
.map(Node => (Node \\ "@type", Node \\ "@id", (Node \\\\ "rs" \\\\ "interp").map( n => ((n \\\\ "@type").text, (n \\\\ "@value").text ))))
// COMMAND ----------
val wantedFields = Seq("verdictCategory","punishmentCategory","offenceCategory").toSet
// COMMAND ----------
val trials = (elem \\\\ "div1").filter(Node => ((Node \\ "@type").text == "trialAccount"))
.map(Node => ((Node \\ "@type").text, (Node \\ "@id").text, (Node \\\\ "rs" \\\\ "interp")
.filter(n => wantedFields.contains( (n \\\\ "@type").text))
.map( n => ((n \\\\ "@type").text, (n \\\\ "@value").text ))))
// COMMAND ----------
// MAGIC %md
// MAGIC Since there can be more than one defendant in a trial, we need to reduce by key as follows.
// COMMAND ----------
def reduceByKey(collection: Traversable[Tuple2[String, Int]]) = {
collection
.groupBy(_._1)
.map { case (group: String, traversable) => traversable.reduce{(a,b) => (a._1, a._2 + b._2)} }
}
// COMMAND ----------
// MAGIC %md
// MAGIC Let's process the coarsest data on the trial as json strings.
// COMMAND ----------
val trials = (elem \\\\ "div1").filter(Node => ((Node \\ "@type").text == "trialAccount"))
.map(Node => {val trialId = (Node \\ "@id").text;
val trialInterps = (Node \\\\ "rs" \\\\ "interp")
.filter(n => wantedFields.contains( (n \\\\ "@type").text))
//.map( n => ((n \\\\ "@type").text, (n \\\\ "@value").text ));
.map( n => ((n \\\\ "@value").text , 1 ));
val trialCounts = reduceByKey(trialInterps).toMap;
//(trialId, trialInterps, trialCounts)
scala.util.parsing.json.JSONObject(trialCounts updated ("id", trialId))
})
// COMMAND ----------
trials.foreach(println)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 2: Extract, Transform and Load XML files to get DataFrame of counts
// MAGIC
// MAGIC We have played enough (see **Step 1: Exploring data first: xml parsing in scala** above first) to understand what to do now with our xml data in order to get it converted to counts of crimes, verdicts and punishments.
// MAGIC
// MAGIC Let's parse the xml files and turn into Dataframe in one block.
// COMMAND ----------
val rawWTF = sc.wholeTextFiles("dbfs:/datasets/obo/tei/sessionsPapers/*.xml") // has all data on crimes and punishments
val raw = rawWTF.map( x => x._2 )
val trials = raw.flatMap( x => {
val elem = scala.xml.XML.loadString(x);
val outJson = (elem \\\\ "div1").filter(Node => ((Node \\ "@type").text == "trialAccount"))
.map(Node => {val trialId = (Node \\ "@id").text;
val trialInterps = (Node \\\\ "rs" \\\\ "interp")
.filter(n => wantedFields.contains( (n \\\\ "@type").text))
//.map( n => ((n \\\\ "@type").text, (n \\\\ "@value").text ));
.map( n => ((n \\\\ "@value").text , 1 ));
val trialCounts = reduceByKey(trialInterps).toMap;
//(trialId, trialInterps, trialCounts)
scala.util.parsing.json.JSONObject(trialCounts updated ("id", trialId)).toString()
})
outJson
})
// COMMAND ----------
dbutils.fs.rm("dbfs:/datasets/obo/processed/trialCounts",recurse=true) // let's remove the files from the previous analysis
trials.saveAsTextFile("dbfs:/datasets/obo/processed/trialCounts") // now let's save the trial counts - aboout 220 seconds to pars all data and get counts
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/obo/processed/trialCounts"))
// COMMAND ----------
val trialCountsDF = sqlContext.read.json("dbfs:/datasets/obo/processed/trialCounts")
// COMMAND ----------
trialCountsDF.printSchema
// COMMAND ----------
trialCountsDF.count // total number of trials
// COMMAND ----------
display(trialCountsDF)
// COMMAND ----------
val trDF = trialCountsDF.na.fill(0) // filling nulls with 0
// COMMAND ----------
display(trDF)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Preparation: Some examples to learn xml and scala
// COMMAND ----------
val p = new scala.xml.PrettyPrinter(80, 2)
// COMMAND ----------
p.format(elem)
// COMMAND ----------
// MAGIC %md
// MAGIC ### Better examples:
// MAGIC
// MAGIC http://alvinalexander.com/scala/how-to-extract-data-from-xml-nodes-in-scala
// MAGIC
// MAGIC http://alvinalexander.com/scala/scala-xml-xpath-example
// MAGIC
// MAGIC
// MAGIC
// MAGIC
// MAGIC
// MAGIC XML to JSON, if you want to go this route:
// MAGIC
// MAGIC http://scala-tools.org/mvnsites/liftweb-2.0/framework/scaladocs/index.html
// MAGIC
// MAGIC https://mkaz.github.io/2011/05/23/how-to-convert-xml-to-json/
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC ## Step 0: Dowloading and Loading Data (The Full Dataset)
// MAGIC
// MAGIC First we will be downloading data from [http://www.math.canterbury.ac.nz/~r.sainudiin/datasets/public/OldBailey/index.html](http://www.math.canterbury.ac.nz/~r.sainudiin/datasets/public/OldBailey/index.html).
// MAGIC
// MAGIC The steps below need to be done once for a give shard!
// MAGIC
// MAGIC **You need to mount AWS S3 with your AWS credentials to analyse it in dbCE or upgrade your databricks account if you want to folow these steps!**
// MAGIC
// MAGIC **Optional TODOs:**
// MAGIC * one could just read the zip files directly (see week 10 on Beijing taxi trajectories example from the scalable-data-science course or read 'importing zip files' in the Guide).
// MAGIC * one could just download from s3 directly
// COMMAND ----------
// MAGIC %sh
// MAGIC wget https://dl.dropboxusercontent.com/u/3531607/datasets/OldBailey/OB_tei_7-2_CC-BY-NC.zip
// COMMAND ----------
// MAGIC %sh
// MAGIC pwd && ls -al
// COMMAND ----------
// MAGIC %sh
// MAGIC unzip OB_tei_7-2_CC-BY-NC.zip
// COMMAND ----------
// MAGIC %md
// MAGIC Let's put the files in dbfs.
// COMMAND ----------
dbutils.fs.mkdirs("dbfs:/datasets/obo/tei") //need not be done again!
// COMMAND ----------
dbutils.fs.cp("file:/databricks/driver/tei", "dbfs:/datasets/obo/tei/",recurse=true) // already done and it takes 1500 seconds - a while!
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/obo/tei/ordinarysAccounts"))
// COMMAND ----------
util.Properties.versionString // check scala version
// COMMAND ----------
// MAGIC %md
// MAGIC Repeat the same process with the obo-tiny dataset
// COMMAND ----------
// MAGIC %sh
// MAGIC ## Now let's us download the zip file from Dropbox
// MAGIC #wget https://dl.dropboxusercontent.com/u/3531607/datasets/OldBailey/OB-tiny_tei_7-2_CC-BY-NC.zip
// MAGIC
// MAGIC ## it is also available from github, just uncomment and wget from git if Dropbox has issues downloading
// MAGIC # wget https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/datasets/obo-tiny/OB-tiny_tei_7-2_CC-BY-NC.zip
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Big Data Analysis for Humanities and Social Sciences](https://www.eventbrite.co.uk/e/big-data-analysis-for-the-humanities-and-social-sciences-tickets-26708754604)
// MAGIC
// MAGIC ### August 26, 2016, King's Digital Lab, King's College London
// MAGIC #### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/) | lamastex/scalable-data-science | db/20160826_KDL_Intro2BDA_HumSocSci/06_OldBaileyOnline/OBO_LoadExtract.scala | Scala | unlicense | 12,338 |
object T {
Tuppel_1.get
}
| yusuke2255/dotty | tests/untried/pos/t3642/t3642_2.scala | Scala | bsd-3-clause | 28 |
package eagerEval
import leon._
import lang._
import collection._
object MergeSort {
def merge[T](less: (T, T) => Boolean)(xs: List[T], ys: List[T]): List[T] = {
(xs, ys) match {
case (Nil(), _) => ys
case (_, Nil()) => xs
case (Cons(x, xtail), Cons(y, ytail)) =>
if (less(x, y))
x :: merge(less)(xtail, ys)
else
y :: merge(less)(xs, ytail)
}
} ensuring { res => res.content == xs.content ++ ys.content &&
res.size == xs.size + ys.size }
def split[T](list: List[T]): (List[T], List[T]) = {
list match {
case Nil() => (Nil(), Nil())
case Cons(x, Nil()) => (Cons(x, Nil()), Nil())
case Cons(x1, Cons(x2, xs)) =>
val (s1, s2) = split(xs)
(Cons(x1, s1), Cons(x2, s2))
}
}
def msort[T](less: (T, T) => Boolean)(l: List[T]): List[T] = {
l match {
case Nil() => Nil[T]()
case Cons(x, Nil()) => Cons(x, Nil())
case _ =>
val (first, second) = split(l)
merge(less)(msort(less)(first), msort(less)(second))
}
} ensuring { res => res.content == l.content && res.size == l.size }
}
| epfl-lara/leon | testcases/lazy-datastructures/eager/BasicMergeSort.scala | Scala | gpl-3.0 | 1,174 |
/**
* Copyright 2012-2013 StackMob
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stackmob.newman
import scalaz._
import Scalaz._
import scala.concurrent.{Future, ExecutionContext}
import org.apache.http.params.HttpConnectionParams
import response.HttpResponseCode
import org.apache.http.util.EntityUtils
import java.net.URL
import org.apache.http.client.methods._
import org.apache.http.entity.{ByteArrayEntity, BufferedHttpEntity}
import org.apache.http.HttpHeaders._
import com.stackmob.newman.request._
import com.stackmob.newman.Exceptions.UnknownHttpStatusCodeException
import com.stackmob.newman.response.HttpResponse
import org.apache.http.impl.client.{AbstractHttpClient, DefaultHttpClient}
import org.apache.http.conn.ClientConnectionManager
import org.apache.http.impl.conn.PoolingClientConnectionManager
import java.util.concurrent.{ThreadFactory, Executors}
import ApacheHttpClient._
import java.util.concurrent.atomic.AtomicInteger
class ApacheHttpClient(socketTimeout: Int = ApacheHttpClient.DefaultSocketTimeout,
connectionTimeout: Int = ApacheHttpClient.DefaultConnectionTimeout,
maxConnectionsPerRoute: Int = ApacheHttpClient.DefaultMaxConnectionsPerRoute,
maxTotalConnections: Int = ApacheHttpClient.DefaultMaxTotalConnections)(implicit val requestContext: ExecutionContext = newmanRequestExecutionContext) extends HttpClient {
val httpClient: org.apache.http.client.HttpClient = {
val connManager: ClientConnectionManager = {
val cm = new PoolingClientConnectionManager()
cm.setDefaultMaxPerRoute(maxConnectionsPerRoute)
cm.setMaxTotal(maxTotalConnections)
cm
}
val client = new DefaultHttpClient(connManager)
val httpParams = client.getParams
HttpConnectionParams.setConnectionTimeout(httpParams, connectionTimeout)
HttpConnectionParams.setSoTimeout(httpParams, socketTimeout)
client
}
protected def executeRequest(httpMessage: HttpRequestBase,
url: URL,
headers: Headers,
body: Option[RawBody] = none): Future[HttpResponse] = Future {
httpMessage.setURI(url.toURI)
headers.foreach { list: NonEmptyList[(String, String)] =>
list.foreach {tup: (String, String) =>
if(!tup._1.equalsIgnoreCase(CONTENT_LENGTH)) {
httpMessage.addHeader(tup._1, tup._2)
}
}
}
//if there's both a body and httpMessage is an entity enclosing request, then set the body
(body tuple httpMessage.cast[HttpEntityEnclosingRequestBase]).foreach { tup: (RawBody, HttpEntityEnclosingRequestBase) =>
val (body,req) = tup
req.setEntity(new ByteArrayEntity(body))
}
val apacheResponse = httpClient.execute(httpMessage)
val responseCode = HttpResponseCode.fromInt(apacheResponse.getStatusLine.getStatusCode) | {
throw new UnknownHttpStatusCodeException(apacheResponse.getStatusLine.getStatusCode)
}
val responseHeaders = apacheResponse.getAllHeaders.map(h => (h.getName, h.getValue)).toList
val responseBody = Option(apacheResponse.getEntity).map(new BufferedHttpEntity(_)).map(EntityUtils.toByteArray(_))
HttpResponse(responseCode, responseHeaders.toNel, responseBody | RawBody.empty)
}
override def get(url: URL, headers: Headers): GetRequest = GetRequest(url, headers) {
executeRequest(new HttpGet, url, headers)
}
override def post(url: URL, headers: Headers, body: RawBody): PostRequest = PostRequest(url, headers, body) {
executeRequest(new HttpPost, url, headers, Option(body))
}
override def put(url: URL, headers: Headers, body: RawBody): PutRequest = PutRequest(url, headers, body) {
executeRequest(new HttpPut, url, headers, Option(body))
}
override def delete(url: URL, headers: Headers): DeleteRequest = DeleteRequest(url, headers) {
executeRequest(new HttpDelete, url, headers)
}
override def head(url: URL, headers: Headers): HeadRequest = HeadRequest(url, headers) {
executeRequest(new HttpHead, url, headers)
}
}
object ApacheHttpClient {
private[ApacheHttpClient] val DefaultSocketTimeout = 30000
private[ApacheHttpClient] val DefaultConnectionTimeout = 5000
private[ApacheHttpClient] val DefaultMaxConnectionsPerRoute = 20
private[ApacheHttpClient] val DefaultMaxTotalConnections = 100
private[ApacheHttpClient] val NumThreads = 8
private val threadNumber = new AtomicInteger(1)
lazy val newmanThreadPool = Executors.newFixedThreadPool(NumThreads, new ThreadFactory() {
override def newThread(r: Runnable): Thread = {
new Thread(r, "newman-" + threadNumber.getAndIncrement)
}
})
lazy val newmanRequestExecutionContext = ExecutionContext.fromExecutorService(newmanThreadPool)
}
| megamsys/newman | src/main/scala/com/stackmob/newman/ApacheHttpClient.scala | Scala | apache-2.0 | 5,361 |
package com.sksamuel.scapegoat.inspections.empty
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{ FreeSpec, Matchers, OneInstancePerTest }
/** @author Stephen Samuel */
class EmptyWhileBlockTest extends FreeSpec with Matchers with PluginRunner with OneInstancePerTest {
override val inspections = Seq(new EmptyWhileBlock)
"empty while block" - {
"should report warning" in {
val code =
"""object Test {
| while(true) {}
|} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 1
}
}
"not empty while block" - {
"should not report warning" in {
val code =
"""object Test {
| while(true) { println("sam") }
|} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 0
}
}
}
| pwwpche/scalac-scapegoat-plugin | src/test/scala/com/sksamuel/scapegoat/inspections/empty/EmptyWhileBlockTest.scala | Scala | apache-2.0 | 893 |
package com.mlh
import akka.actor.ActorSystem
package object clustering {
import com.mlh.clustering.ClusteringConfig._
implicit val system = ActorSystem(clusterName)
}
| eikon-paku-ca/akka-cluster-docker-sample | src/main/scala/package.scala | Scala | mit | 178 |
package org.usagram.clarify.generation.generator
class Fragment(start: Int, end: Option[Int] = None, enclosed: Boolean = false)(builder: Int => String) {
def to(n: Int): Fragment =
new Fragment(start, Some(n), enclosed)(builder)
def enclose: Fragment =
new Fragment(start, end, true)(builder)
def decorate(decorator: String => String): Fragment =
new Fragment(start, end, enclosed)(builder andThen decorator)
def prefix(prefix: String): Fragment = decorate(prefix + _)
def postfix(postfix: String): Fragment = decorate(_ + postfix)
def member(member: String): Fragment = postfix(s".$member")
override def toString = {
val strings = end match {
case Some(n) => start to n map builder
case None => Iterable(start) map builder
}
if (strings.size == 1) {
strings.head
}
else if (enclosed) {
strings.mkString("(", ", ", ")")
}
else {
strings.mkString(", ")
}
}
}
| takkkun/clarify | generation/src/main/scala/org/usagram/clarify/generation/generator/Fragment.scala | Scala | mit | 959 |
/**
* Orders
* Service for processing orders
*
* OpenAPI spec version:
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package works.weave.socks.aws.orders.presentation.value
import java.net.URI
import works.weave.socks.aws.orders.domain.GOrder
import works.weave.socks.aws.orders.domain.GOrderTypes
case class OrderRequest(
id : String,
customerId : String,
customer : URI, // Swagger: OrderCustomer,
address : URI, // Swagger: OrderAddress,
card : URI, // Swagger: OrderCard,
items : URI, // Swagger: List[OrderItems],
shipment : URI,
date : String,
total : Number) extends GOrder[Order.Minimal]
case class Order(
id : String,
customerId : String,
customer : OrderCustomer,
address : OrderAddress,
card : OrderCard,
items : List[OrderItems],
shipment : Option[OrderShipment],
date : String,
total : Number,
_links : OrderLinks) extends GOrder[Order.Full]
object Order {
trait Full extends GOrderTypes {
override type Customer = OrderCustomer
override type Address = OrderAddress
override type Card = OrderCard
override type Items = List[OrderItems]
override type Shipment = Option[OrderShipment]
}
trait Minimal extends GOrderTypes {
type Customer = URI
type Address = URI
type Card = URI
type Items = URI
type Shipment = URI
}
}
case class OrderLinks(
self : LinksSelf)
| Compositional/orders-aws | src/main/scala/works.weave.socks.aws.orders/presentation/value/Order.scala | Scala | apache-2.0 | 2,076 |
package smartupedit
package io
import language.reflectiveCalls
trait FileHandlingClient extends Client with FileHandling with Exporting with Opening with Saving {
}
| wookietreiber/smartupedit | core/main/scala/io/FileHandlingClient.scala | Scala | gpl-3.0 | 167 |
package org.apache.spot.utilities
object TimeUtilities {
// convert HH:MM:SS string to seconds
def getTimeAsDouble(timeStr: String) : Double = {
val s = timeStr.split(":")
val hours = s(0).toInt
val minutes = s(1).toInt
val seconds = s(2).toInt
(3600*hours + 60*minutes + seconds).toDouble
}
}
| kpeiruza/incubator-spot | spot-ml/src/main/scala/org/apache/spot/utilities/TimeUtilities.scala | Scala | apache-2.0 | 325 |
package lib
import org.scalatest.{FunSpec, Matchers}
class UrlKeySpec extends FunSpec with Matchers {
describe("generate") {
it("allows apidoc keys") {
Seq("apidoc-spec", "apidoc-generator", "apidoc-api").foreach { key =>
UrlKey.generate(key) should be(key)
UrlKey.validate(key) should be(Nil)
}
}
it("good urls alone") {
UrlKey.generate("foos") should be("foos")
UrlKey.generate("foos-bar") should be("foos-bar")
}
it("numbers") {
UrlKey.generate("foos123") should be("foos123")
}
it("lower case") {
UrlKey.generate("FOOS-BAR") should be("foos-bar")
}
it("trim") {
UrlKey.generate(" foos-bar ") should be("foos-bar")
}
it("leading garbage") {
UrlKey.generate("!foos") should be("foos")
}
it("trailing garbage") {
UrlKey.generate("foos!") should be("foos")
}
it("allows underscores") {
UrlKey.generate("ning_1_8_client") should be("ning_1_8_client")
}
}
describe("validate") {
it("short") {
UrlKey.validate("ba") should be(Seq("Key must be at least 3 characters"))
}
it("doesn't match generated") {
UrlKey.validate("VALID") should be(Seq("Key must be in all lower case and contain alphanumerics only (-, _, and . are supported). A valid key would be: valid"))
UrlKey.validate("bad nickname") should be(Seq("Key must be in all lower case and contain alphanumerics only (-, _, and . are supported). A valid key would be: bad-nickname"))
}
}
}
| Seanstoppable/apidoc | lib/src/test/scala/UrlKeySpec.scala | Scala | mit | 1,542 |
package sk.scalagine.resource
/**
* Created with IntelliJ IDEA.
* User: zladovan
* Date: 13.09.14
* Time: 22:52
*/
class ResourceNotFoundException(resource: ResourceLoader, cause: Throwable = null)
extends RuntimeException(resource.toString, cause)
| zladovan/scalagine | engine/resource/src/main/scala/sk/scalagine/resource/ResourceNotFoundException.scala | Scala | mit | 257 |
import sbt._
import Keys._
object Build extends Build with Common {
// Project
lazy val blue = Project("blue", file(".")) aggregate (
core,
view, mustache,
json, xml,
finagle
)
// Core
lazy val core = Project("core", file("blue-core")) settings (common:_*)
// Views
lazy val view = Project("view", file("blue-view")) settings (common:_*) dependsOn (core)
lazy val mustache = Project("mustache", file("blue-mustache")) settings (common:_*) dependsOn (core, view)
// Codecs
lazy val json = Project("json", file("blue-json")) settings (common:_*) dependsOn (core)
lazy val xml = Project("xml", file("blue-xml")) settings (common:_*) dependsOn (core)
// Integrations
lazy val finagle = Project("finagle", file("blue-finagle")) settings (common:_*) dependsOn (core)
// Examples
lazy val exampleSimple = Project("ex-rails", file("blue-examples/rails")) settings (common:_*) dependsOn (core, json, xml, finagle)
lazy val exampleRails = Project("ex-rest", file("blue-examples/rest")) settings (common:_*) dependsOn (core, json, xml, mustache, finagle)
} | cstivers78/blue | project/Build.scala | Scala | apache-2.0 | 1,109 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package funnel
package elastic
import org.scalacheck.{Properties => P, _}
object ExplodedTest extends P("elastic") {
val genName = Gen.oneOf("k1", "k2")
val genHost = Gen.oneOf("h1", "h2")
val genKey = for {
n <- genName
h <- genHost
} yield Key[Stats](n, Units.Count, "description", Map(AttributeKeys.source -> h))
val datapoint = Option(Datapoint(Key[Stats]("n1", Units.Count, "description", Map(AttributeKeys.source -> "h1")), Stats(3)))
/* for {
k <- genKey
//d <- Gen.posNum[Double]
} yield Option(Datapoint(k, 3 /*d */)) */
val E = ElasticExploded(Monitoring.default, new Instruments(Monitoring.default))
import E._
// At least one group per key/host pair. I.e. no data is lost.
property("elasticGroupTop") = Prop.forAll(Gen.listOf(datapoint)) { dps =>
val gs = elasticGroup(List("k"))(dps ++ dps)
val sz = gs.map(_.mapValues(_.size).values.sum).sum
sz >= dps.size
}
// Emits as few times as possible
property("elasticGroupBottom") = Prop.forAll(Gen.listOf(datapoint)) { dps =>
val noDups = dps.groupBy(_.get.key).mapValues(_.head).values
elasticGroup(List("k"))(noDups ++ noDups).size == 1 || dps.size == 0
}
property("elasticUngroup") = Prop.forAll(Gen.listOf(datapoint)) { dps =>
val gs = elasticGroup(List("k"))(dps ++ dps)
val ug = elasticUngroup("flask", "flask")(gs)
ug.forall(_.isObject)
}
}
| neigor/funnel | elastic/src/test/scala/ExplodedTest.scala | Scala | apache-2.0 | 2,205 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.stats
import com.beust.jcommander.ParameterException
import org.geotools.data.{DataStore, FileDataStore}
import org.locationtech.geomesa.index.stats.HasGeoMesaStats
import org.locationtech.geomesa.tools.stats.StatsCountCommand.StatsCountParams
import org.locationtech.geomesa.tools.{Command, DataStoreCommand, ProvidedTypeNameParam}
import org.opengis.filter.Filter
trait StatsCountCommand[DS <: DataStore with HasGeoMesaStats] extends DataStoreCommand[DS] {
override val name = "stats-count"
override def params: StatsCountParams
override def execute(): Unit = withDataStore(count)
protected def count(ds: DS): Unit = {
for {
p <- Option(params).collect { case p: ProvidedTypeNameParam => p }
f <- Option(ds).collect { case f: FileDataStore => f }
} { p.featureName = f.getSchema.getTypeName }
val sft = ds.getSchema(params.featureName)
if (sft == null) {
throw new ParameterException(s"Schema '${params.featureName}' does not exist")
}
val filter = Option(params.cqlFilter).getOrElse(Filter.INCLUDE)
if (params.exact) {
Command.user.info("Running stat query...")
val count = ds.stats.getCount(sft, filter, params.exact).map(_.toString).getOrElse("Unknown")
Command.output.info(s"Count: $count")
} else {
ds.stats.getCount(sft, filter, params.exact).map(_.toString) match {
case None =>
Command.output.info("Estimated count: Unknown")
Command.output.info("Re-run with --no-cache to get an exact count")
case Some(count) =>
Command.output.info(s"Estimated count: $count")
}
}
}
}
object StatsCountCommand {
// @Parameters(commandDescription = "Estimate or calculate feature counts in a GeoMesa feature type")
trait StatsCountParams extends StatsParams
}
| locationtech/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/stats/StatsCountCommand.scala | Scala | apache-2.0 | 2,312 |
package controllers
import commons.Commons._
import datadefinitions.BusDefinitions
import datadefinitions.BusDefinitions.BusRoute
import play.api.libs.json.{Json}
import play.api.mvc.{Action, Controller}
import prediction.{KNNPredictionImpl, PredictionRequest}
object CommonFunctions extends Controller {
def getRouteList = Action {
val routeList = BusDefinitions.sortedRouteList
if (!routeList.isEmpty) {
val jsonMap = Map("routeList" -> Json.toJson(routeList))
Ok(Json.toJson(jsonMap))
} else {
Ok("No routes available")
}
}
def getRouteListWithFirstLastStops = Action {
val routeList = BusDefinitions.sortedRouteListWithFirstLastStops
val jsonMap = Map("routeList" -> Json.toJson(routeList.map(x => x._1 + ";" + x._2 + ";" + x._3)))
Ok(Json.toJson(jsonMap))
}
def getDirectionList(routeID: String) = Action {
val outwardDirection = BusDefinitions.busRouteDefinitions.filter(key => key._1.routeID == routeID && key._1.direction == "outbound").head._2.last.busStop.busStopName
val returnDirection = BusDefinitions.busRouteDefinitions.filter(key => key._1.routeID == routeID && key._1.direction == "inbound").head._2.last.busStop.busStopName
val jsonMap = Map("directionList" -> Json.toJson(List("outbound," + outwardDirection, "inbound," + returnDirection)))
Ok(Json.toJson(jsonMap))
}
def getStopList(routeID: String, direction: String) = Action {
val stopList = BusDefinitions.busRouteDefinitions(BusRoute(routeID, direction)).map(x => x.sequenceNumber + "," + x.busStop.busStopName)
val jsonMap = Map("stopList" -> Json.toJson(stopList))
Ok(Json.toJson(jsonMap))
}
def makePrediction(routeID: String, direction: String, fromStopID: String, toStopID: String) = Action {
val busRoute = BusRoute(routeID, direction)
val pr = new PredictionRequest(busRoute, fromStopID, toStopID, System.currentTimeMillis().getDayCode, System.currentTimeMillis().getTimeOffset)
val prediction = KNNPredictionImpl.makePrediction(pr)
Ok(if (prediction.isDefined) prediction.get._1.toString + "," + prediction.get._2.toString else "Unable to make a prediction at this time")
}
}
| chrischivers/London-Bus-Tracker-Play-Framework | app/controllers/CommonFunctions.scala | Scala | mit | 2,195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.config.TableConfigOptions
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.api.{DataTypes, TableSchema, Types, ValidationException}
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.utils._
import org.apache.flink.table.runtime.types.TypeInfoDataTypeConverter
import org.apache.flink.types.Row
import org.junit.{Before, Test}
class LegacyTableSourceTest extends TableTestBase {
private val util = batchTestUtil()
private val tableSchema = TableSchema.builder().fields(
Array("a", "b", "c"),
Array(DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING())).build()
@Before
def setup(): Unit = {
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal(
"ProjectableTable", new TestLegacyProjectableTableSource(
true,
tableSchema,
new RowTypeInfo(
tableSchema.getFieldDataTypes.map(TypeInfoDataTypeConverter.fromDataTypeToTypeInfo),
tableSchema.getFieldNames),
Seq.empty[Row])
)
TestLegacyFilterableTableSource.createTemporaryTable(
util.tableEnv,
TestLegacyFilterableTableSource.defaultSchema,
"FilterableTable",
isBounded = true)
TestPartitionableSourceFactory.createTemporaryTable(util.tableEnv, "PartitionableTable", true)
}
@Test
def testBoundedStreamTableSource(): Unit = {
TestTableSource.createTemporaryTable(util.tableEnv, isBounded = true, tableSchema, "MyTable")
util.verifyExecPlan("SELECT * FROM MyTable")
}
@Test
def testUnboundedStreamTableSource(): Unit = {
TestTableSource.createTemporaryTable(util.tableEnv, isBounded = false, tableSchema, "MyTable")
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Cannot query on an unbounded source in batch mode")
util.verifyExecPlan("SELECT * FROM MyTable")
}
@Test
def testSimpleProject(): Unit = {
util.verifyExecPlan("SELECT a, c FROM ProjectableTable")
}
@Test
def testProjectWithoutInputRef(): Unit = {
util.verifyExecPlan("SELECT COUNT(1) FROM ProjectableTable")
}
@Test
def testNestedProject(): Unit = {
val nested1 = new RowTypeInfo(
Array(Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("name", "value")
)
val nested2 = new RowTypeInfo(
Array(Types.INT, Types.BOOLEAN).asInstanceOf[Array[TypeInformation[_]]],
Array("num", "flag")
)
val deepNested = new RowTypeInfo(
Array(nested1, nested2).asInstanceOf[Array[TypeInformation[_]]],
Array("nested1", "nested2")
)
val tableSchema = new TableSchema(
Array("id", "deepNested", "nested", "name"),
Array(Types.INT, deepNested, nested1, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, deepNested, nested1, Types.STRING).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "deepNested", "nested", "name"))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal(
"T",
new TestNestedProjectableTableSource(true, tableSchema, returnType, Seq()))
val sqlQuery =
"""
|SELECT id,
| deepNested.nested1.name AS nestedName,
| nested.`value` AS nestedValue,
| deepNested.nested2.flag AS nestedFlag,
| deepNested.nested2.num AS nestedNum
|FROM T
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
@Test
def testFilterCanPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2")
}
@Test
def testFilterCannotPushDown(): Unit = {
// TestFilterableTableSource only accept predicates with `amount`
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE price > 10")
}
@Test
def testFilterPartialPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND price > 10")
}
@Test
def testFilterFullyPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND amount < 10")
}
@Test
def testFilterCannotPushDown2(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 OR price > 10")
}
@Test
def testFilterCannotPushDown3(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 OR amount < 10")
}
@Test
def testFilterPushDownUnconvertedExpression(): Unit = {
val sqlQuery =
"""
|SELECT * FROM FilterableTable WHERE
| amount > 2 AND id < 100 AND CAST(amount AS BIGINT) > 10
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
@Test
def testFilterPushDownWithUdf(): Unit = {
util.addFunction("myUdf", Func1)
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND myUdf(amount) < 32")
}
@Test
def testPartitionTableSource(): Unit = {
util.verifyExecPlan(
"SELECT * FROM PartitionableTable WHERE part2 > 1 and id > 2 AND part1 = 'A' ")
}
@Test
def testPartitionTableSourceWithUdf(): Unit = {
util.addFunction("MyUdf", Func1)
util.verifyExecPlan("SELECT * FROM PartitionableTable WHERE id > 2 AND MyUdf(part2) < 3")
}
@Test
def testTimeLiteralExpressionPushDown(): Unit = {
val schema = TableSchema.builder()
.field("id", DataTypes.INT)
.field("dv", DataTypes.DATE)
.field("tv", DataTypes.TIME)
.field("tsv", DataTypes.TIMESTAMP(3))
.build()
val row = new Row(4)
row.setField(0, 1)
row.setField(1, DateTimeTestUtil.localDate("2017-01-23"))
row.setField(2, DateTimeTestUtil.localTime("14:23:02"))
row.setField(3, DateTimeTestUtil.localDateTime("2017-01-24 12:45:01.234"))
TestLegacyFilterableTableSource.createTemporaryTable(
util.tableEnv,
schema,
"FilterableTable1",
isBounded = true,
List(row),
List("dv", "tv", "tsv"))
val sqlQuery =
s"""
|SELECT id FROM FilterableTable1 WHERE
| tv > TIME '14:25:02' AND
| dv > DATE '2017-02-03' AND
| tsv > TIMESTAMP '2017-02-03 14:25:02.000'
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
@Test
def testTableHint(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED, true)
val ddl =
s"""
|CREATE TABLE MyTable1 (
| name STRING,
| a bigint,
| b int,
| c double
|) with (
| 'connector.type' = 'TestFilterableSource',
| 'is-bounded' = 'true'
|)
""".stripMargin
util.tableEnv.executeSql(ddl)
util.tableEnv.executeSql(
s"""
|CREATE TABLE MySink (
| `a` BIGINT,
| `b` INT,
| `c` DOUBLE
|) WITH (
| 'connector' = 'filesystem',
| 'format' = 'testcsv',
| 'path' = '/tmp/test'
|)
""".stripMargin)
val stmtSet = util.tableEnv.createStatementSet()
stmtSet.addInsertSql(
"""
|insert into MySink select a,b,c from MyTable1
| /*+ OPTIONS('source.num-element-to-skip'='31') */
|""".stripMargin)
stmtSet.addInsertSql(
"""
|insert into MySink select a,b,c from MyTable1
| /*+ OPTIONS('source.num-element-to-skip'='32') */
|""".stripMargin)
util.verifyExecPlan(stmtSet)
}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/LegacyTableSourceTest.scala | Scala | apache-2.0 | 8,425 |
package com.fpis.ch02
import scala.annotation.tailrec
/**
* EXERCISE 1 (optional): Write a function to get the nth Fibonacci number.
* The first two Fibonacci numbers are 0 and 1, and the next number is always the sum of the previous two.
* Your definition should use a local tail-recursive function.
*/
object Ex1_Fibonacci {
def fib(n: Int): Int = {
@tailrec
def go(prev: Int, current: Int, n: Int): Int = {
if (n == 0) current
else {
go(current, prev + current, n - 1)
}
}
if (n < 0) throw new RuntimeException("Fibonacci not defined for negative values")
else if (n == 0) 0
else go(0, 1, n)
}
}
| avinash-anand/FuncProgInScalaExAns | src/main/scala/com/fpis/ch02/Ex1_Fibonacci.scala | Scala | gpl-2.0 | 667 |
package nicol.math
/*
Copied from Tom Streller's (scan) Nicol-0.1.2 project (https://github.com/scan/Nicol)
due to a lack of scala 2.10 compatible version
*/
/// @todo Turn these into traits to support other kind of matrices
/**
* A 2x2 Matrix, stored in Row-Major order:
*
* ( a b )
* ( c d )
*/
sealed class Matrix private(val a: Float, val b: Float, val c: Float, val d: Float) extends Immutable {
lazy val transposed = Matrix((a, b), (c, d))
@throws(classOf[ArithmeticException])
lazy val invert = new Matrix(d, -b, -c, a) * (1f / (a * d - b * c))
def *(m: Matrix) = new Matrix(a * m.a + b * m.c, a * m.b + b * m.d, c * m.a + d * m.c, c * m.b + d * m.d)
def +(m: Matrix) = new Matrix(a + m.a, b + m.b, c + m.c, d + m.d)
def -(m: Matrix) = new Matrix(a - m.a, b - m.b, c - m.c, d - m.d)
def *(f: Float) = new Matrix(f * a, f * b, f * c, f * d)
def *(v: Vector) = Vector(a * v.x + c * v.y, b * v.x + d * v.y)
}
object Matrix {
object identity extends Matrix(1, 0, 0, 1)
case class rotated(r: Float) extends Matrix(
math.cos(r).toFloat,
-math.sin(r).toFloat,
math.sin(r).toFloat,
math.cos(r).toFloat
)
case class scaled(sx: Float, sy: Float) extends Matrix(sx, 0, 0, sy)
/**
* Create a new Matrix from Column-Major tuples
*
* ( t1.x t2.x )
* ( t1.y t2.y )
*/
def apply(t1: (Float, Float), t2: (Float, Float)): Matrix = new Matrix(t1._1, t2._1, t1._2, t2._2)
/**
* Create a new Matrix from Row-Major values
*
* ( a b )
* ( c d )
*/
def apply(a: Float, b: Float, c: Float, d: Float): Matrix = new Matrix(a, b, c, d)
} | fehu/agent-tareas | lwjgl/src/main/scala/nicol/math/Matrix.scala | Scala | mit | 1,619 |
package com.twitter.finagle.partitioning
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.client.utils.StringClient
import com.twitter.finagle.liveness.FailureAccrualFactory
import com.twitter.finagle.naming.BindingFactory
import com.twitter.finagle.param.Stats
import com.twitter.finagle.partitioning.PartitioningService.PartitionedResults
import com.twitter.finagle.partitioning.{param => partitioningParam}
import com.twitter.finagle.server.utils.StringServer
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.DefaultTimer
import com.twitter.finagle.{Address, param => ctfparam, _}
import com.twitter.util._
import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.CountDownLatch
import org.scalatest.concurrent.Eventually
import org.scalatest.BeforeAndAfterEach
import scala.collection.mutable
import org.scalatest.funsuite.AnyFunSuite
trait PartitioningServiceTestBase extends AnyFunSuite with BeforeAndAfterEach with Eventually {
import PartitioningServiceTestBase._
protected[this] val failingHosts = new mutable.HashSet[String]()
protected[this] val slowHosts = new mutable.HashSet[String]()
protected[this] var servers: Seq[(ListeningServer, InetSocketAddress, Int, Int)] = _
protected[this] var client: Service[String, String] = _
protected[this] var timer: MockTimer = _
protected[this] var serverLatchOpt: Option[CountDownLatch] = _
override def beforeEach(): Unit = {
failingHosts.clear()
slowHosts.clear()
timer = new MockTimer
serverLatchOpt = None
}
override def afterEach(): Unit = {
client.close()
servers.foreach(_._1.close())
}
def getPartitioningServiceModule: Stackable[ServiceFactory[String, String]]
protected[this] def awaitResult[T](awaitable: Awaitable[T]): T = Await.result(awaitable, Timeout)
protected[this] def createServers(
size: Int,
startingIndex: Int = 0
): Seq[(ListeningServer, InetSocketAddress, Int, Int)] = {
def echoService(servername: String): Service[String, String] =
Service.mk[String, String](req => {
if (failingHosts.contains(servername)) {
Future.exception(new RuntimeException(s"$servername failed!"))
} else if (slowHosts.contains(servername)) {
Future
.sleep(12.seconds)(DefaultTimer)
.before(
Future.value(s"Response from $servername: after sleep")
)
} else {
// sending back the hostname along with the request value, so that the caller can
// assert that the request landed on the correct host. Also take care of multiple
// request strings (batched request) that are delimited by RequestDelimiter
val requests = req.split(RequestDelimiter)
val response = requests.map(_ + EchoDelimiter + servername) // $port:$hostname
Future.value(response.mkString(ResponseDelimiter))
} ensure {
serverLatchOpt match {
case Some(latch) => latch.countDown()
case None =>
}
}
})
// create a cluster of multiple servers, listening on unique port numbers
startingIndex until (startingIndex + size) map { i =>
val addr = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val server = StringServer.server.serve(addr, echoService(servername = s"server#$i"))
val boundAddress = server.boundAddress.asInstanceOf[InetSocketAddress]
val port = boundAddress.getPort
(server, boundAddress, port, i)
}
}
protected[this] def createClient(
sr: StatsReceiver,
dest: Name = Name.bound(servers.map(s => Address(s._2)): _*),
ejectFailedHosts: Boolean = false
): Service[String, String] = {
// create a partitioning aware finagle client by inserting the PartitioningService appropriately
val newClientStack =
StackClient
.newStack[String, String]
.insertAfter(
BindingFactory.role,
getPartitioningServiceModule
)
StringClient.client
.withStack(newClientStack)
.withRequestTimeout(1.second)
.configured(Stats(sr))
.configured(FailureAccrualFactory.Param(1, () => 10.minutes))
.configured(partitioningParam.EjectFailedHost(ejectFailedHosts))
.configured(ctfparam.Timer(timer))
.newService(dest, "client")
}
}
object PartitioningServiceTestBase {
// Using the following delimiters to simulate batched requests. When request string contains
// multiple delimited strings, it will be treated as a batched request, with each string segment
// to be served with matching partition.
val RequestDelimiter = ";"
// When request was batched the responses will be collected and returned back after combining
// together using the delimiter
val ResponseDelimiter = ";"
val EchoDelimiter = ':'
val Timeout: Duration = 5.seconds
def mergeStringResults(origReq: String, pr: PartitionedResults[String, String]): String = {
// responses contain the request keys. So just concatenate. In a real implementation this will
// typically be a key-value map.
if (pr.failures.isEmpty) {
pr.successes.map { case (_, v) => v }.mkString(ResponseDelimiter)
} else if (pr.successes.isEmpty) {
pr.failures.map { case (_, t) => t.getClass.getTypeName }.mkString(ResponseDelimiter)
} else {
// appending the server exceptions here to easily test partial success for batch operations
pr.successes.map { case (_, v) => v }.mkString(ResponseDelimiter) + ResponseDelimiter +
pr.failures.map { case (_, t) => t.getClass.getTypeName }.mkString(ResponseDelimiter)
}
}
}
| twitter/finagle | finagle-partitioning/src/test/scala/com/twitter/finagle/partitioning/PartitioningServiceTestBase.scala | Scala | apache-2.0 | 5,728 |
package org.bitcoins.wallet
import java.nio.file.Files
import com.typesafe.config.ConfigFactory
import org.bitcoins.core.config.{MainNet, RegTest, TestNet3}
import org.bitcoins.core.hd.HDPurposes
import org.bitcoins.testkit.util.BitcoinSAsyncTest
import org.bitcoins.wallet.config.WalletAppConfig
import scala.util.Properties
class WalletAppConfigTest extends BitcoinSAsyncTest {
val tempDir = Files.createTempDirectory("bitcoin-s")
val config: WalletAppConfig =
WalletAppConfig(baseDatadir = tempDir, Vector.empty)
it must "resolve DB connections correctly " in {
assert(config.dbPath.startsWith(Properties.tmpDir))
}
it must "be overridable" in {
assert(config.network == RegTest)
val otherConf = ConfigFactory.parseString("bitcoin-s.network = testnet3")
val withOther: WalletAppConfig = config.withOverrides(otherConf)
assert(withOther.network == TestNet3)
val mainnetConf = ConfigFactory.parseString("bitcoin-s.network = mainnet")
val mainnet: WalletAppConfig = withOther.withOverrides(mainnetConf)
assert(mainnet.network == MainNet)
}
it should "not matter how the overrides are passed in" in {
val overrider = ConfigFactory.parseString(s"""
|bitcoin-s {
| network = mainnet
|}
|""".stripMargin)
val throughConstructor = WalletAppConfig(tempDir, Vector(overrider))
val throughWithOverrides = config.withOverrides(overrider)
assert(throughWithOverrides.network == MainNet)
assert(throughWithOverrides.network == throughConstructor.network)
assert(throughWithOverrides.datadir == throughConstructor.datadir)
}
it must "be overridable without screwing up other options" in {
val otherConf = ConfigFactory.parseString(
s"bitcoin-s.wallet.defaultAccountType = segwit"
)
val thirdConf = ConfigFactory.parseString(
s"bitcoin-s.wallet.defaultAccountType = nested-segwit")
val overriden = config.withOverrides(otherConf)
val twiceOverriden = overriden.withOverrides(thirdConf)
assert(overriden.defaultAccountKind == HDPurposes.SegWit)
assert(twiceOverriden.defaultAccountKind == HDPurposes.NestedSegWit)
assert(config.datadir == overriden.datadir)
assert(twiceOverriden.datadir == overriden.datadir)
}
it must "be overridable with multiple levels" in {
val testnet = ConfigFactory.parseString("bitcoin-s.network = testnet3")
val mainnet = ConfigFactory.parseString("bitcoin-s.network = mainnet")
val overriden: WalletAppConfig =
config.withOverrides(Vector(testnet, mainnet))
assert(overriden.network == MainNet)
}
it must "have user data directory configuration take precedence" in {
val tempDir = Files.createTempDirectory("bitcoin-s")
val tempFile = Files.createFile(tempDir.resolve("bitcoin-s.conf"))
val confStr = """
| bitcoin-s {
| network = testnet3
|
| logging {
| level = off
|
| p2p = warn
| }
| }
""".stripMargin
val _ = Files.write(tempFile, confStr.getBytes())
val appConfig = WalletAppConfig(baseDatadir = tempDir, Vector.empty)
assert(appConfig.datadir == tempDir.resolve("testnet3"))
assert(appConfig.network == TestNet3)
}
it must "fail to start the wallet app config if we have different seeds" in {
val seedFile = config.seedPath
val startedF = config.start()
//stop old oracle
val stoppedF = for {
_ <- startedF
_ <- config.stop()
} yield ()
val deletedF = for {
_ <- stoppedF
} yield {
//delete the seed so we start with a new seed
Files.delete(seedFile)
}
val start2F = for {
_ <- deletedF
_ <- config.start()
} yield ()
//start it again and except an exception
recoverToSucceededIf[RuntimeException] {
start2F
}
}
}
| bitcoin-s/bitcoin-s | wallet-test/src/test/scala/org/bitcoins/wallet/WalletAppConfigTest.scala | Scala | mit | 4,150 |
package net.scalax.cpoi.test
import java.util.{Calendar, Date}
import net.scalax.cpoi.api._
import net.scalax.cpoi.exception.{ExpectBooleanCellException, ExpectStringCellException}
import org.apache.poi.hssf.usermodel.HSSFWorkbook
import org.apache.poi.ss.usermodel.CellType
import org.scalatest._
class HSSFWorkbookNumbericCellTest1 extends FlatSpec with Matchers {
"numberic cell" should "read as empty string by common string reader" in {
import readers._
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
wrap.cellType should be(Option(CellType.NUMERIC))
val value = wrap.tryValue[String]
wrap.cellType should be(Option(CellType.STRING))
value.isRight should be(true)
value.getOrElse(throw new Exception("Test not pass")) should be("123.321")
}
it should "read as double by double reader" in {
import readers._
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
val value = wrap.tryValue[Double]
value.isRight should be(true)
value.getOrElse(throw new Exception("Test not pass")) should be(123.321)
}
it should "throw exception when read by boolean reader" in {
import readers._
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
val value = wrap.tryValue[Boolean]
value.isLeft should be(true)
value.left.getOrElse(throw new Exception("Test not pass")).isInstanceOf[ExpectBooleanCellException] should be(true)
}
it should "read as date when read by date reader" in {
import readers._
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
val value = wrap.tryValue[Date]
value.isRight should be(true)
val calendar = Calendar.getInstance
calendar.setTime(value.getOrElse(throw new Exception("Test not pass")))
calendar.get(Calendar.YEAR) should be(1900)
}
it should "throw exception when read by immutable string reader" in {
import immutableReaders._
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
wrap.cellType should be(Option(CellType.NUMERIC))
val value = wrap.tryValue[String]
wrap.cellType should be(Option(CellType.NUMERIC))
value.isLeft should be(true)
value.left.getOrElse(throw new Exception("Test not pass")).isInstanceOf[ExpectStringCellException] should be(true)
}
it should "read as string by non empty string reader" in {
implicit val ec = readers.nonEmptyStringReader
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
wrap.cellType should be(Option(CellType.NUMERIC))
val value = wrap.tryValue[String]
wrap.cellType should be(Option(CellType.STRING))
value.isRight should be(true)
value.getOrElse(throw new Exception("Test not pass")) should be("123.321")
}
it should "read as trim string by non blank string reader" in {
implicit val ec = readers.nonBlankStringReader
val workbook = new HSSFWorkbook()
val sheet = workbook.createSheet("Sheet1")
val row = sheet.createRow(1)
val cell = row.createCell(1)
cell.setCellValue(123.321)
val wrap = CPoi.wrapCell(cell)
wrap.cellType should be(Option(CellType.NUMERIC))
val value = wrap.tryValue[String]
wrap.cellType should be(Option(CellType.STRING))
value.isRight should be(true)
value.getOrElse(throw new Exception("Test not pass")) should be("123.321")
}
}
| scalax/poi-collection | src/test/scala/net/scalax/cpoi/read/HSSFWorkbookNumbericCellTest1.scala | Scala | mit | 4,348 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala.async
import org.apache.flink.annotation.Internal
import org.apache.flink.streaming.api.functions.async.collector.{AsyncCollector => JavaAsyncCollector}
import scala.collection.JavaConverters._
/**
* Internal wrapper class to map a Flink's Java API [[JavaAsyncCollector]] to a Scala
* [[AsyncCollector]].
*
* @param javaAsyncCollector to forward the calls to
* @tparam OUT type of the output elements
*/
@Internal
class JavaAsyncCollectorWrapper[OUT](val javaAsyncCollector: JavaAsyncCollector[OUT])
extends AsyncCollector[OUT] {
override def collect(result: Iterable[OUT]): Unit = {
javaAsyncCollector.collect(result.asJavaCollection)
}
override def collect(throwable: Throwable): Unit = {
javaAsyncCollector.collect(throwable)
}
}
| WangTaoTheTonic/flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/async/JavaAsyncCollectorWrapper.scala | Scala | apache-2.0 | 1,619 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.plugins.transformer.numeric
import de.fuberlin.wiwiss.silk.runtime.plugin.Plugin
import de.fuberlin.wiwiss.silk.util.StringUtils._
import de.fuberlin.wiwiss.silk.linkagerule.input.SimpleTransformer
@Plugin(
id = "logarithm",
categories = Array("Numeric"),
label = "Logarithm",
description = "Transforms all numbers by applying the logarithm function. Non-numeric values are left unchanged."
)
case class LogarithmTransformer(base: Int = 10) extends SimpleTransformer {
override def evaluate(value: String) = {
value match {
case DoubleLiteral(d) => (math.log(d) / math.log(base)).toString
case str => str
}
}
} | fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/plugins/transformer/numeric/LogarithmTransformer.scala | Scala | apache-2.0 | 1,241 |
/*
* Copyright 2017-2018 Iaroslav Zeigerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akkeeper.master.route
import akka.actor.ActorRef
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import akkeeper.api._
import scala.concurrent.ExecutionContext
class ContainerController(service: ActorRef)(implicit dispatcher: ExecutionContext,
timeout: Timeout)
extends BaseController with ContainerApiJsonProtocol with ContainerDefinitionJsonProtocol {
registerHandler[ContainerGetResult](StatusCodes.OK)
registerHandler[ContainersList](StatusCodes.OK)
registerHandler[ContainerNotFound](StatusCodes.NotFound)
registerHandler[OperationFailed](StatusCodes.InternalServerError)
override val route: Route =
pathPrefix("containers") {
path(Segment) { containerName =>
get {
handleRequest(service, GetContainer(containerName))
}
} ~
(pathEnd | pathSingleSlash) {
get {
handleRequest(service, GetContainers())
}
}
}
}
object ContainerController {
def apply(service: ActorRef)(implicit dispatcher: ExecutionContext,
timeout: Timeout): BaseController = {
new ContainerController(service)
}
}
| akkeeper-project/akkeeper | akkeeper/src/main/scala/akkeeper/master/route/ContainerController.scala | Scala | apache-2.0 | 1,844 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io._
import java.nio.charset.StandardCharsets
import java.util.{ConcurrentModificationException, EnumSet, UUID}
import scala.reflect.ClassTag
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.UninterruptibleThread
/**
* A [[MetadataLog]] implementation based on HDFS. [[HDFSMetadataLog]] uses the specified `path`
* as the metadata storage.
*
* When writing a new batch, [[HDFSMetadataLog]] will firstly write to a temp file and then rename
* it to the final batch file. If the rename step fails, there must be multiple writers and only
* one of them will succeed and the others will fail.
*
* Note: [[HDFSMetadataLog]] doesn't support S3-like file systems as they don't guarantee listing
* files in a directory always shows the latest files.
*/
class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, path: String)
extends MetadataLog[T] with Logging {
private implicit val formats = Serialization.formats(NoTypeHints)
/** Needed to serialize type T into JSON when using Jackson */
private implicit val manifest = Manifest.classType[T](implicitly[ClassTag[T]].runtimeClass)
// Avoid serializing generic sequences, see SPARK-17372
require(implicitly[ClassTag[T]].runtimeClass != classOf[Seq[_]],
"Should not create a log with type Seq, use Arrays instead - see SPARK-17372")
import HDFSMetadataLog._
val metadataPath = new Path(path)
protected val fileManager = createFileManager()
if (!fileManager.exists(metadataPath)) {
fileManager.mkdirs(metadataPath)
}
/**
* A `PathFilter` to filter only batch files
*/
protected val batchFilesFilter = new PathFilter {
override def accept(path: Path): Boolean = isBatchFile(path)
}
protected def batchIdToPath(batchId: Long): Path = {
new Path(metadataPath, batchId.toString)
}
protected def pathToBatchId(path: Path) = {
path.getName.toLong
}
protected def isBatchFile(path: Path) = {
try {
path.getName.toLong
true
} catch {
case _: NumberFormatException => false
}
}
protected def serialize(metadata: T, out: OutputStream): Unit = {
// called inside a try-finally where the underlying stream is closed in the caller
Serialization.write(metadata, out)
}
protected def deserialize(in: InputStream): T = {
// called inside a try-finally where the underlying stream is closed in the caller
val reader = new InputStreamReader(in, StandardCharsets.UTF_8)
Serialization.read[T](reader)
}
/**
* Store the metadata for the specified batchId and return `true` if successful. If the batchId's
* metadata has already been stored, this method will return `false`.
*/
override def add(batchId: Long, metadata: T): Boolean = {
get(batchId).map(_ => false).getOrElse {
// Only write metadata when the batch has not yet been written
if (fileManager.isLocalFileSystem) {
Thread.currentThread match {
case ut: UninterruptibleThread =>
// When using a local file system, "writeBatch" must be called on a
// [[org.apache.spark.util.UninterruptibleThread]] so that interrupts can be disabled
// while writing the batch file. This is because there is a potential dead-lock in
// Hadoop "Shell.runCommand" before 2.5.0 (HADOOP-10622). If the thread running
// "Shell.runCommand" is interrupted, then the thread can get deadlocked. In our case,
// `writeBatch` creates a file using HDFS API and will call "Shell.runCommand" to set
// the file permission if using the local file system, and can get deadlocked if the
// stream execution thread is stopped by interrupt. Hence, we make sure that
// "writeBatch" is called on [[UninterruptibleThread]] which allows us to disable
// interrupts here. Also see SPARK-14131.
ut.runUninterruptibly { writeBatch(batchId, metadata, serialize) }
case _ =>
throw new IllegalStateException(
"HDFSMetadataLog.add() on a local file system must be executed on " +
"a o.a.spark.util.UninterruptibleThread")
}
} else {
// For a distributed file system, such as HDFS or S3, if the network is broken, write
// operations may just hang until timeout. We should enable interrupts to allow stopping
// the query fast.
writeBatch(batchId, metadata, serialize)
}
true
}
}
def writeTempBatch(metadata: T, writer: (T, OutputStream) => Unit = serialize): Option[Path] = {
var nextId = 0
while (true) {
val tempPath = new Path(metadataPath, s".${UUID.randomUUID.toString}.tmp")
try {
val output = fileManager.create(tempPath)
try {
writer(metadata, output)
return Some(tempPath)
} finally {
IOUtils.closeQuietly(output)
}
} catch {
case e: IOException if isFileAlreadyExistsException(e) =>
// Failed to create "tempPath". There are two cases:
// 1. Someone is creating "tempPath" too.
// 2. This is a restart. "tempPath" has already been created but not moved to the final
// batch file (not committed).
//
// For both cases, the batch has not yet been committed. So we can retry it.
//
// Note: there is a potential risk here: if HDFSMetadataLog A is running, people can use
// the same metadata path to create "HDFSMetadataLog" and fail A. However, this is not a
// big problem because it requires the attacker must have the permission to write the
// metadata path. In addition, the old Streaming also have this issue, people can create
// malicious checkpoint files to crash a Streaming application too.
nextId += 1
}
}
None
}
/**
* Write a batch to a temp file then rename it to the batch file.
*
* There may be multiple [[HDFSMetadataLog]] using the same metadata path. Although it is not a
* valid behavior, we still need to prevent it from destroying the files.
*/
private def writeBatch(batchId: Long, metadata: T, writer: (T, OutputStream) => Unit): Unit = {
val tempPath = writeTempBatch(metadata, writer).getOrElse(
throw new IllegalStateException(s"Unable to create temp batch file $batchId"))
try {
// Try to commit the batch
// It will fail if there is an existing file (someone has committed the batch)
logDebug(s"Attempting to write log #${batchIdToPath(batchId)}")
fileManager.rename(tempPath, batchIdToPath(batchId))
// SPARK-17475: HDFSMetadataLog should not leak CRC files
// If the underlying filesystem didn't rename the CRC file, delete it.
val crcPath = new Path(tempPath.getParent(), s".${tempPath.getName()}.crc")
if (fileManager.exists(crcPath)) fileManager.delete(crcPath)
} catch {
case e: IOException if isFileAlreadyExistsException(e) =>
// If "rename" fails, it means some other "HDFSMetadataLog" has committed the batch.
// So throw an exception to tell the user this is not a valid behavior.
throw new ConcurrentModificationException(
s"Multiple HDFSMetadataLog are using $path", e)
case e: FileNotFoundException =>
// Sometimes, "create" will succeed when multiple writers are calling it at the same
// time. However, only one writer can call "rename" successfully, others will get
// FileNotFoundException because the first writer has removed it.
throw new ConcurrentModificationException(
s"Multiple HDFSMetadataLog are using $path", e)
} finally {
fileManager.delete(tempPath)
}
}
private def isFileAlreadyExistsException(e: IOException): Boolean = {
e.isInstanceOf[FileAlreadyExistsException] ||
// Old Hadoop versions don't throw FileAlreadyExistsException. Although it's fixed in
// HADOOP-9361 in Hadoop 2.5, we still need to support old Hadoop versions.
(e.getMessage != null && e.getMessage.startsWith("File already exists: "))
}
/**
* @return the deserialized metadata in a batch file, or None if file not exist.
* @throws IllegalArgumentException when path does not point to a batch file.
*/
def get(batchFile: Path): Option[T] = {
if (fileManager.exists(batchFile)) {
if (isBatchFile(batchFile)) {
get(pathToBatchId(batchFile))
} else {
throw new IllegalArgumentException(s"File ${batchFile} is not a batch file!")
}
} else {
None
}
}
override def get(batchId: Long): Option[T] = {
val batchMetadataFile = batchIdToPath(batchId)
if (fileManager.exists(batchMetadataFile)) {
val input = fileManager.open(batchMetadataFile)
try {
Some(deserialize(input))
} finally {
IOUtils.closeQuietly(input)
}
} else {
logDebug(s"Unable to find batch $batchMetadataFile")
None
}
}
override def get(startId: Option[Long], endId: Option[Long]): Array[(Long, T)] = {
val files = fileManager.list(metadataPath, batchFilesFilter)
val batchIds = files
.map(f => pathToBatchId(f.getPath))
.filter { batchId =>
(endId.isEmpty || batchId <= endId.get) && (startId.isEmpty || batchId >= startId.get)
}
batchIds.sorted.map(batchId => (batchId, get(batchId))).filter(_._2.isDefined).map {
case (batchId, metadataOption) =>
(batchId, metadataOption.get)
}
}
override def getLatest(): Option[(Long, T)] = {
val batchIds = fileManager.list(metadataPath, batchFilesFilter)
.map(f => pathToBatchId(f.getPath))
.sorted
.reverse
for (batchId <- batchIds) {
val batch = get(batchId)
if (batch.isDefined) {
return Some((batchId, batch.get))
}
}
None
}
/**
* Get an array of [FileStatus] referencing batch files.
* The array is sorted by most recent batch file first to
* oldest batch file.
*/
def getOrderedBatchFiles(): Array[FileStatus] = {
fileManager.list(metadataPath, batchFilesFilter)
.sortBy(f => pathToBatchId(f.getPath))
.reverse
}
/**
* Removes all the log entry earlier than thresholdBatchId (exclusive).
*/
override def purge(thresholdBatchId: Long): Unit = {
val batchIds = fileManager.list(metadataPath, batchFilesFilter)
.map(f => pathToBatchId(f.getPath))
for (batchId <- batchIds if batchId < thresholdBatchId) {
val path = batchIdToPath(batchId)
fileManager.delete(path)
logTrace(s"Removed metadata log file: $path")
}
}
private def createFileManager(): FileManager = {
val hadoopConf = sparkSession.sessionState.newHadoopConf()
try {
new FileContextManager(metadataPath, hadoopConf)
} catch {
case e: UnsupportedFileSystemException =>
logWarning("Could not use FileContext API for managing metadata log files at path " +
s"$metadataPath. Using FileSystem API instead for managing log files. The log may be " +
s"inconsistent under failures.")
new FileSystemManager(metadataPath, hadoopConf)
}
}
}
object HDFSMetadataLog {
/** A simple trait to abstract out the file management operations needed by HDFSMetadataLog. */
trait FileManager {
/** List the files in a path that matches a filter. */
def list(path: Path, filter: PathFilter): Array[FileStatus]
/** Make directory at the give path and all its parent directories as needed. */
def mkdirs(path: Path): Unit
/** Whether path exists */
def exists(path: Path): Boolean
/** Open a file for reading, or throw exception if it does not exist. */
def open(path: Path): FSDataInputStream
/** Create path, or throw exception if it already exists */
def create(path: Path): FSDataOutputStream
/**
* Atomically rename path, or throw exception if it cannot be done.
* Should throw FileNotFoundException if srcPath does not exist.
* Should throw FileAlreadyExistsException if destPath already exists.
*/
def rename(srcPath: Path, destPath: Path): Unit
/** Recursively delete a path if it exists. Should not throw exception if file doesn't exist. */
def delete(path: Path): Unit
/** Whether the file systme is a local FS. */
def isLocalFileSystem: Boolean
}
/**
* Default implementation of FileManager using newer FileContext API.
*/
class FileContextManager(path: Path, hadoopConf: Configuration) extends FileManager {
private val fc = if (path.toUri.getScheme == null) {
FileContext.getFileContext(hadoopConf)
} else {
FileContext.getFileContext(path.toUri, hadoopConf)
}
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fc.util.listStatus(path, filter)
}
override def rename(srcPath: Path, destPath: Path): Unit = {
fc.rename(srcPath, destPath)
}
override def mkdirs(path: Path): Unit = {
fc.mkdir(path, FsPermission.getDirDefault, true)
}
override def open(path: Path): FSDataInputStream = {
fc.open(path)
}
override def create(path: Path): FSDataOutputStream = {
fc.create(path, EnumSet.of(CreateFlag.CREATE))
}
override def exists(path: Path): Boolean = {
fc.util().exists(path)
}
override def delete(path: Path): Unit = {
try {
fc.delete(path, true)
} catch {
case e: FileNotFoundException =>
// ignore if file has already been deleted
}
}
override def isLocalFileSystem: Boolean = fc.getDefaultFileSystem match {
case _: local.LocalFs | _: local.RawLocalFs =>
// LocalFs = RawLocalFs + ChecksumFs
true
case _ => false
}
}
/**
* Implementation of FileManager using older FileSystem API. Note that this implementation
* cannot provide atomic renaming of paths, hence can lead to consistency issues. This
* should be used only as a backup option, when FileContextManager cannot be used.
*/
class FileSystemManager(path: Path, hadoopConf: Configuration) extends FileManager {
private val fs = path.getFileSystem(hadoopConf)
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fs.listStatus(path, filter)
}
/**
* Rename a path. Note that this implementation is not atomic.
* @throws FileNotFoundException if source path does not exist.
* @throws FileAlreadyExistsException if destination path already exists.
* @throws IOException if renaming fails for some unknown reason.
*/
override def rename(srcPath: Path, destPath: Path): Unit = {
if (!fs.exists(srcPath)) {
throw new FileNotFoundException(s"Source path does not exist: $srcPath")
}
if (fs.exists(destPath)) {
throw new FileAlreadyExistsException(s"Destination path already exists: $destPath")
}
if (!fs.rename(srcPath, destPath)) {
throw new IOException(s"Failed to rename $srcPath to $destPath")
}
}
override def mkdirs(path: Path): Unit = {
fs.mkdirs(path, FsPermission.getDirDefault)
}
override def open(path: Path): FSDataInputStream = {
fs.open(path)
}
override def create(path: Path): FSDataOutputStream = {
fs.create(path, false)
}
override def exists(path: Path): Boolean = {
fs.exists(path)
}
override def delete(path: Path): Unit = {
try {
fs.delete(path, true)
} catch {
case e: FileNotFoundException =>
// ignore if file has already been deleted
}
}
override def isLocalFileSystem: Boolean = fs match {
case _: LocalFileSystem | _: RawLocalFileSystem =>
// LocalFileSystem = RawLocalFileSystem + ChecksumFileSystem
true
case _ => false
}
}
}
| ZxlAaron/mypros | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala | Scala | apache-2.0 | 17,062 |
package memnets.ui
import memnets.model._
import memnets.utils.{BeanSupport, DirtySupport}
import scala.beans.BeanProperty
import scala.collection.mutable.ListBuffer
class PhasePlot(
var scale: Double = 1.0,
var w: Double = 450,
var h: Double = 450,
var zoom: Double = 1.0,
var showFrame: Boolean = true
)(implicit val mn: DynamicSystem)
extends ElementBase {
mn.elements += this
@BeanProperty var paneStyle: Option[String] = None
@BeanProperty var autoCamera: AutoCamera = NullAutoCamera
@BeanProperty var showBorder: Boolean = false
@BeanProperty var showGlass: Boolean = true
@BeanProperty var originLight: Boolean = true
@BeanProperty var sceneRotateY: Double = 0.0
@BeanProperty var sceneRotateZ: Double = 0.0
@BeanProperty var tiltDown: Double = 25.0
var onCreateScene: Scene3D => Unit = sc => {}
protected[memnets] val _phases = ListBuffer[Phase2D]()
def phases: Iterable[Phase2D] = _phases
def width: Double = scale * w
def height: Double = scale * h
}
object Phase3D {
def apply(
x: Yb,
y: Yb,
z: Yb,
window: Int = 360,
sampling: Int = 1,
scale: Double = 10.0,
showAxis: Boolean = true
)(implicit plot: PhasePlot) = {
val ph = new Phase3D(x, y, z, plot)
ph.scale = scale
ph.window = window
ph.sampling = sampling
ph.showAxis = showAxis
plot.autoCamera = { (te, sc3d) =>
val scale = ph.scale
sc3d.moveCamera(-x.act * scale, -y.act * scale, -z.act * scale)
}
plot.loc = Loc().right(200)
ph
}
}
object Phase {
def apply(osc: Osc)(implicit plot: PhasePlot): Phase2D = apply(osc.y, osc.x)
def apply(
x: Yb,
y: Yb,
window: Int = 360,
sampling: Int = 1,
scale: Double = 10.0,
tiltDown: Double = 85.0
)(implicit plot: PhasePlot) = {
val ph = new Phase2D(x, y, plot)
ph.scale = scale
ph.window = window
ph.sampling = sampling
plot.autoCamera = { (te, sc3d) =>
val scale = ph.scale
sc3d.moveCamera(-x.act * scale, -y.act * scale)
}
plot.tiltDown = tiltDown
plot.loc = Loc().right(200)
ph
}
}
class Phase2D(private var _x: Yb, private var _y: Yb, private val _plot: PhasePlot)
extends BeanSupport
with DirtySupport
with Dsl
with Logging {
_plot._phases += this
@BeanProperty var scale: Double = 10.0
@BeanProperty var sampling: Int = 2
@BeanProperty var temporal = false
@BeanProperty var window = 100
@BeanProperty var showLabels = true
@BeanProperty var showAxis = true
var translateX = 0.0
def x = _x
def y = _y
def plot: PhasePlot = _plot
def update(osc: Osc): Unit = update(osc.y, osc.x)
def update(x: Yb, y: Yb): Unit = {
logger.debug(s"x = ${x.id}, y = ${y.id}")
require(x != null)
require(y != null)
_x = x
_y = y
dirty = true
dirty = false
}
}
class Phase3D(x: Yb, y: Yb, private var _z: Yb, plot: PhasePlot) extends Phase2D(x, y, plot) {
def z = _z
def update(x: Yb, y: Yb, z: Yb): Unit = {
require(z != null)
_z = z
super.update(x, y)
}
}
| MemoryNetworks/memnets | api/src/main/scala/memnets/ui/Phase.scala | Scala | apache-2.0 | 3,087 |
/**
* Copyright (C) 2012 Typesafe, Inc. <http://www.typesafe.com>
*/
package org.pantsbuild.zinc.compiler
import java.io.File
import java.net.URLClassLoader
import sbt.internal.inc.{
AnalyzingCompiler,
CompileOutput,
IncrementalCompilerImpl,
RawCompiler,
ScalaInstance,
javac,
ZincUtil
}
import sbt.internal.inc.classpath.ClassLoaderCache
import sbt.io.Path
import sbt.io.syntax._
import sbt.util.Logger
import xsbti.compile.{
ClasspathOptionsUtil,
CompilerCache,
Compilers,
GlobalsCache,
Inputs,
JavaTools,
ScalaCompiler,
ScalaInstance => XScalaInstance,
ZincCompilerUtil
}
import scala.compat.java8.OptionConverters._
import org.pantsbuild.zinc.cache.Cache
import org.pantsbuild.zinc.cache.Cache.Implicits
import org.pantsbuild.zinc.util.Util
object CompilerUtils {
val JavaClassVersion = System.getProperty("java.class.version")
private val compilerCacheLimit = Util.intProperty("zinc.compiler.cache.limit", 5)
private val residentCacheLimit = Util.intProperty("zinc.resident.cache.limit", 0)
/**
* Static cache for resident scala compilers.
*/
private val residentCache: GlobalsCache = {
val maxCompilers = residentCacheLimit
if (maxCompilers <= 0)
CompilerCache.fresh
else
CompilerCache.createCacheFor(maxCompilers)
}
/**
* Cache of classloaders: see https://github.com/pantsbuild/pants/issues/4744
*/
private val classLoaderCache: Option[ClassLoaderCache] =
Some(new ClassLoaderCache(new URLClassLoader(Array())))
/**
* Get the instance of the GlobalsCache.
*/
def getGlobalsCache = residentCache
/**
* Create a new scala compiler.
*/
def newScalaCompiler(instance: XScalaInstance, bridgeJar: File): AnalyzingCompiler =
new AnalyzingCompiler(
instance,
ZincCompilerUtil.constantBridgeProvider(instance, bridgeJar),
ClasspathOptionsUtil.auto,
_ => (),
classLoaderCache
)
/**
* Create a new classloader with the root loader as parent (to avoid zinc itself being included).
*/
def scalaLoader(jars: Seq[File]) =
new URLClassLoader(
Path.toURLs(jars),
sbt.internal.inc.classpath.ClasspathUtilities.rootLoader
)
/**
* Get the actual scala version from the compiler.properties in a classloader.
* The classloader should only contain one version of scala.
*/
def scalaVersion(scalaLoader: ClassLoader): Option[String] = {
Util.propertyFromResource("compiler.properties", "version.number", scalaLoader)
}
}
| twitter/pants | src/scala/org/pantsbuild/zinc/compiler/CompilerUtils.scala | Scala | apache-2.0 | 2,515 |
package es.weso.wiFetcher.dao.poi
import java.io.InputStream
import scala.collection.mutable.ListBuffer
import org.apache.poi.hssf.util.CellReference
import org.apache.poi.ss.usermodel.FormulaEvaluator
import org.apache.poi.ss.usermodel.Sheet
import org.apache.poi.ss.usermodel.Workbook
import org.apache.poi.ss.usermodel.WorkbookFactory
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import es.weso.wiFetcher.configuration.Configuration
import es.weso.wiFetcher.dao.ProviderDAO
import es.weso.wiFetcher.entities.Provider
import es.weso.wiFetcher.utils.POIUtils
import es.weso.wiFetcher.utils.IssueManagerUtils
import es.weso.wiFetcher.fetchers.SpreadsheetsFetcher
/**
* This class contains the implementation that allows to load all information
* about providers used by the Web Index
*/
class ProviderDAOImpl(is: InputStream)(implicit val sFetcher: SpreadsheetsFetcher)
extends ProviderDAO with PoiDAO[Provider] {
import ProviderDAOImpl._
private val providers: ListBuffer[Provider] = ListBuffer.empty
load(is)
/**
* This method has to load the information about providers
* @param path The path of the files that contains the information
*/
protected def load(is: InputStream) {
val workbook = WorkbookFactory.create(is)
//Obtain the corresponding sheet
val sheet = workbook.getSheet(SheetName)
if (sheet == null) {
sFetcher.issueManager.addError(
message = s"The Indicators Sheet ${SheetName} does not exist",
path = XslxFile)
} else {
logger.info("Begin providers extraction")
providers ++= parseData(workbook, sheet)
logger.info("Finish extraction of providers")
}
}
protected def parseData(workbook: Workbook, sheet: Sheet): Seq[Provider] = {
//Obtain the first cell to load data. The first cell is in the properties
//file
val initialCell = new CellReference(Configuration.getProvierInitialCell)
for {
row <- initialCell.getRow() to sheet.getLastRowNum()
evaluator: FormulaEvaluator = workbook.getCreationHelper().createFormulaEvaluator()
//Extract the identifier of the provider
id = POIUtils.extractCellValue(
sheet.getRow(row).getCell(Configuration.getProviderIdColumn), evaluator)
//Extract the name of the provider
name = POIUtils.extractCellValue(
sheet.getRow(row).getCell(Configuration.getProviderNameColumn), evaluator)
//Extract the web of the provider
web = POIUtils.extractCellValue(
sheet.getRow(row).getCell(Configuration.getProviderWebColumn),
evaluator)
//Extract the source of the provider
source = POIUtils.extractCellValue(
sheet.getRow(row).getCell(Configuration.getProviderSourceColumn),
evaluator)
} yield {
Provider(id, name, web, source)
}
}
def getProviders(): List[Provider] = {
providers.toList
}
}
object ProviderDAOImpl {
/**
* The name of the sheet that contains the information
*/
private val SheetName = "Providers"
private val XslxFile = Some("Structure File")
private val logger: Logger = LoggerFactory.getLogger(this.getClass())
} | weso/wiFetcher | app/es/weso/wiFetcher/dao/poi/ProviderDAOImpl.scala | Scala | apache-2.0 | 3,150 |
package com.dimafeng.testcontainers
import org.testcontainers.containers.wait.WaitStrategy
import org.testcontainers.containers.{BindMode, FixedHostPortGenericContainer => OTCGenericContainer}
class GenericContainer(imageName: String,
fixedPorts: Map[Int, Int] = Map.empty,
exposedPorts: Seq[Int] = Seq(),
env: Map[String, String] = Map(),
command: Seq[String] = Seq(),
classpathResourceMapping: Seq[(String, String, BindMode)] = Seq(),
waitStrategy: Option[WaitStrategy] = None
) extends SingleContainer[OTCGenericContainer[_]] {
type OTCContainer = OTCGenericContainer[T] forSome {type T <: OTCGenericContainer[T]}
override implicit val container: OTCContainer = new OTCGenericContainer(imageName)
if (exposedPorts.nonEmpty) {
container.withExposedPorts(exposedPorts.map(int2Integer): _*)
}
if(fixedPorts.nonEmpty) {
fixedPorts.foreach({case (from, to) => container.withFixedExposedPort(from, to)})
}
env.foreach(Function.tupled(container.withEnv))
if (command.nonEmpty) {
container.withCommand(command: _*)
}
classpathResourceMapping.foreach(Function.tupled(container.withClasspathResourceMapping))
waitStrategy.foreach(container.waitingFor)
}
object GenericContainer {
def apply(imageName: String,
fixedExposedPorts: Map[Int, Int] = Map.empty,
exposedPorts: Seq[Int] = Seq(),
env: Map[String, String] = Map(),
command: Seq[String] = Seq(),
classpathResourceMapping: Seq[(String, String, BindMode)] = Seq(),
waitStrategy: WaitStrategy = null) =
new GenericContainer(imageName, fixedExposedPorts, exposedPorts, env, command, classpathResourceMapping, Option(waitStrategy))
}
| KineticCookie/mist | src/it/scala/com/dimafeng/testcontainers/GenericContainer.scala | Scala | apache-2.0 | 1,659 |
// src/main/scala/progscala2/implicits/phantom-types.scala
// A workflow for payroll calculations.
package progscala.implicits.payroll
sealed trait PreTaxDeductions
sealed trait PostTaxDeductions
sealed trait Final
// For simplicity, use Float for money. Not recommended...
case class Employee(
name: String,
annualSalary: Float,
taxRate: Float, // For simplicity, just 1 rate covering all taxes.
insurancePremiumsPerPayPeriod: Float,
_401kDeductionRate: Float, // A pretax, retirement savings plan in the USA.
postTaxDeductions: Float)
case class Pay[Step](employee: Employee, netPay: Float)
object Payroll {
// Biweekly paychecks. Assume exactly 52 weeks/year for simplicity.
def start(employee: Employee): Pay[PreTaxDeductions] =
Pay[PreTaxDeductions](employee, employee.annualSalary / 26.0F)
def minusInsurance(pay: Pay[PreTaxDeductions]): Pay[PreTaxDeductions] = {
val newNet = pay.netPay - pay.employee.insurancePremiumsPerPayPeriod
pay copy (netPay = newNet)
}
def minus401k(pay: Pay[PreTaxDeductions]): Pay[PreTaxDeductions] = {
val newNet = pay.netPay - (pay.employee._401kDeductionRate * pay.netPay)
pay copy (netPay = newNet)
}
def minusTax(pay: Pay[PreTaxDeductions]): Pay[PostTaxDeductions] = {
val newNet = pay.netPay - (pay.employee.taxRate * pay.netPay)
pay copy (netPay = newNet)
}
def minusFinalDeductions(pay: Pay[PostTaxDeductions]): Pay[Final] = {
val newNet = pay.netPay - pay.employee.postTaxDeductions
pay copy (netPay = newNet)
}
}
object CalculatePayroll {
def main(args: Array[String]) = {
val e = Employee("Buck Trends", 100000.0F, 0.25F, 200F, 0.10F, 0.05F)
val pay1 = Payroll start e
// 401K and insurance can be calculated in either order.
val pay2 = Payroll minus401k pay1
val pay3 = Payroll minusInsurance pay2
val pay4 = Payroll minusTax pay3
val pay = Payroll minusFinalDeductions pay4
val twoWeekGross = e.annualSalary / 26.0F
val twoWeekNet = pay.netPay
val percent = (twoWeekNet / twoWeekGross) * 100
println(s"For ${e.name}, the gross vs. net pay every 2 weeks is:")
println(
f" $$${twoWeekGross}%.2f vs. $$${twoWeekNet}%.2f or ${percent}%.1f%%")
}
} | sunilrebel/programming-scala | examples/src/main/scala/progscala2/implicits/phantom-types.scala | Scala | mpl-2.0 | 2,239 |
package com.twitter.util.logging
abstract class AbstractTraitWithLogging extends TraitWithLogging
trait TraitWithLogging extends Logging {
def myMethod1: String = {
info("In myMethod1")
"Hello, World"
}
}
| twitter/util | util-slf4j-api/src/test/scala/com/twitter/util/logging/TraitWithLogging.scala | Scala | apache-2.0 | 220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.functions.UserDefinedFunction
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.expressions.PlannerNamedWindowProperty
import org.apache.flink.table.planner.plan.logical.LogicalWindow
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecSortWindowAggregate
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import java.util
/** Batch physical RelNode for local sort-based window aggregate. */
class BatchPhysicalLocalSortWindowAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
aggCallToAggFunction: Seq[(AggregateCall, UserDefinedFunction)],
window: LogicalWindow,
val inputTimeFieldIndex: Int,
inputTimeIsDate: Boolean,
namedWindowProperties: Seq[PlannerNamedWindowProperty],
enableAssignPane: Boolean = false)
extends BatchPhysicalSortWindowAggregateBase(
cluster,
traitSet,
inputRel,
outputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
window,
namedWindowProperties,
enableAssignPane,
isMerge = false,
isFinal = false) {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchPhysicalLocalSortWindowAggregate(
cluster,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
window,
inputTimeFieldIndex,
inputTimeIsDate,
namedWindowProperties,
enableAssignPane)
}
override def translateToExecNode(): ExecNode[_] = {
new BatchExecSortWindowAggregate(
grouping,
auxGrouping,
getAggCallList.toArray,
window,
inputTimeFieldIndex,
inputTimeIsDate,
namedWindowProperties.toArray,
FlinkTypeFactory.toLogicalRowType(inputRowType),
enableAssignPane,
false, // isMerge is always false
false, // isFinal is always false
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalLocalSortWindowAggregate.scala | Scala | apache-2.0 | 3,315 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.lang.{Double => jlDouble, Long => jlLong}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.tags.SlowHiveTest
class MyDoubleAvgAggBase extends Aggregator[jlDouble, (Double, Long), jlDouble] {
def zero: (Double, Long) = (0.0, 0L)
def reduce(b: (Double, Long), a: jlDouble): (Double, Long) = {
if (a != null) (b._1 + a, b._2 + 1L) else b
}
def merge(b1: (Double, Long), b2: (Double, Long)): (Double, Long) =
(b1._1 + b2._1, b1._2 + b2._2)
def finish(r: (Double, Long)): jlDouble =
if (r._2 > 0L) 100.0 + (r._1 / r._2.toDouble) else null
def bufferEncoder: Encoder[(Double, Long)] =
Encoders.tuple(Encoders.scalaDouble, Encoders.scalaLong)
def outputEncoder: Encoder[jlDouble] = Encoders.DOUBLE
}
object MyDoubleAvgAgg extends MyDoubleAvgAggBase
object MyDoubleSumAgg extends MyDoubleAvgAggBase {
override def finish(r: (Double, Long)): jlDouble = if (r._2 > 0L) r._1 else null
}
object LongProductSumAgg extends Aggregator[(jlLong, jlLong), Long, jlLong] {
def zero: Long = 0L
def reduce(b: Long, a: (jlLong, jlLong)): Long = {
if ((a._1 != null) && (a._2 != null)) b + (a._1 * a._2) else b
}
def merge(b1: Long, b2: Long): Long = b1 + b2
def finish(r: Long): jlLong = r
def bufferEncoder: Encoder[Long] = Encoders.scalaLong
def outputEncoder: Encoder[jlLong] = Encoders.LONG
}
@SQLUserDefinedType(udt = classOf[CountSerDeUDT])
case class CountSerDeSQL(nSer: Int, nDeSer: Int, sum: Int)
class CountSerDeUDT extends UserDefinedType[CountSerDeSQL] {
def userClass: Class[CountSerDeSQL] = classOf[CountSerDeSQL]
override def typeName: String = "count-ser-de"
private[spark] override def asNullable: CountSerDeUDT = this
def sqlType: DataType = StructType(
StructField("nSer", IntegerType, false) ::
StructField("nDeSer", IntegerType, false) ::
StructField("sum", IntegerType, false) ::
Nil)
def serialize(sql: CountSerDeSQL): Any = {
val row = new GenericInternalRow(3)
row.setInt(0, 1 + sql.nSer)
row.setInt(1, sql.nDeSer)
row.setInt(2, sql.sum)
row
}
def deserialize(any: Any): CountSerDeSQL = any match {
case row: InternalRow if (row.numFields == 3) =>
CountSerDeSQL(row.getInt(0), 1 + row.getInt(1), row.getInt(2))
case u => throw new Exception(s"failed to deserialize: $u")
}
override def equals(obj: Any): Boolean = {
obj match {
case _: CountSerDeUDT => true
case _ => false
}
}
override def hashCode(): Int = classOf[CountSerDeUDT].getName.hashCode()
}
case object CountSerDeUDT extends CountSerDeUDT
object CountSerDeAgg extends Aggregator[Int, CountSerDeSQL, CountSerDeSQL] {
def zero: CountSerDeSQL = CountSerDeSQL(0, 0, 0)
def reduce(b: CountSerDeSQL, a: Int): CountSerDeSQL = b.copy(sum = b.sum + a)
def merge(b1: CountSerDeSQL, b2: CountSerDeSQL): CountSerDeSQL =
CountSerDeSQL(b1.nSer + b2.nSer, b1.nDeSer + b2.nDeSer, b1.sum + b2.sum)
def finish(r: CountSerDeSQL): CountSerDeSQL = r
def bufferEncoder: Encoder[CountSerDeSQL] = ExpressionEncoder[CountSerDeSQL]()
def outputEncoder: Encoder[CountSerDeSQL] = ExpressionEncoder[CountSerDeSQL]()
}
object ArrayDataAgg extends Aggregator[Array[Double], Array[Double], Array[Double]] {
def zero: Array[Double] = Array(0.0, 0.0, 0.0)
def reduce(s: Array[Double], array: Array[Double]): Array[Double] = {
require(s.length == array.length)
for ( j <- 0 until s.length ) {
s(j) += array(j)
}
s
}
def merge(s1: Array[Double], s2: Array[Double]): Array[Double] = {
require(s1.length == s2.length)
for ( j <- 0 until s1.length ) {
s1(j) += s2(j)
}
s1
}
def finish(s: Array[Double]): Array[Double] = s
def bufferEncoder: Encoder[Array[Double]] = ExpressionEncoder[Array[Double]]
def outputEncoder: Encoder[Array[Double]] = ExpressionEncoder[Array[Double]]
}
abstract class UDAQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
val data1 = Seq[(Integer, Integer)](
(1, 10),
(null, -60),
(1, 20),
(1, 30),
(2, 0),
(null, -10),
(2, -1),
(2, null),
(2, null),
(null, 100),
(3, null),
(null, null),
(3, null)).toDF("key", "value")
data1.write.saveAsTable("agg1")
val data2 = Seq[(Integer, Integer, Integer)](
(1, 10, -10),
(null, -60, 60),
(1, 30, -30),
(1, 30, 30),
(2, 1, 1),
(null, -10, 10),
(2, -1, null),
(2, 1, 1),
(2, null, 1),
(null, 100, -10),
(3, null, 3),
(null, null, null),
(3, null, null)).toDF("key", "value1", "value2")
data2.write.saveAsTable("agg2")
val data3 = Seq[(Seq[Double], Int)](
(Seq(1.0, 2.0, 3.0), 0),
(Seq(4.0, 5.0, 6.0), 0),
(Seq(7.0, 8.0, 9.0), 0)
).toDF("data", "dummy")
data3.write.saveAsTable("agg3")
val data4 = Seq[Boolean](true, false, true).toDF("boolvalues")
data4.write.saveAsTable("agg4")
val emptyDF = spark.createDataFrame(
sparkContext.emptyRDD[Row],
StructType(StructField("key", StringType) :: StructField("value", IntegerType) :: Nil))
emptyDF.createOrReplaceTempView("emptyTable")
// Register UDAs
spark.udf.register("mydoublesum", udaf(MyDoubleSumAgg))
spark.udf.register("mydoubleavg", udaf(MyDoubleAvgAgg))
spark.udf.register("longProductSum", udaf(LongProductSumAgg))
spark.udf.register("arraysum", udaf(ArrayDataAgg))
}
override def afterAll(): Unit = {
try {
spark.sql("DROP TABLE IF EXISTS agg1")
spark.sql("DROP TABLE IF EXISTS agg2")
spark.sql("DROP TABLE IF EXISTS agg3")
spark.sql("DROP TABLE IF EXISTS agg4")
spark.catalog.dropTempView("emptyTable")
} finally {
super.afterAll()
}
}
test("aggregators") {
checkAnswer(
spark.sql(
"""
|SELECT
| key,
| mydoublesum(value + 1.5 * key),
| mydoubleavg(value),
| avg(value - key),
| mydoublesum(value - 1.5 * key),
| avg(value)
|FROM agg1
|GROUP BY key
""".stripMargin),
Row(1, 64.5, 120.0, 19.0, 55.5, 20.0) ::
Row(2, 5.0, 99.5, -2.5, -7.0, -0.5) ::
Row(3, null, null, null, null, null) ::
Row(null, null, 110.0, null, null, 10.0) :: Nil)
}
test("non-deterministic children expressions of aggregator") {
val e = intercept[AnalysisException] {
spark.sql(
"""
|SELECT mydoublesum(value + 1.5 * key + rand())
|FROM agg1
|GROUP BY key
""".stripMargin)
}.getMessage
assert(Seq("nondeterministic expression",
"should not appear in the arguments of an aggregate function").forall(e.contains))
}
test("interpreted aggregate function") {
checkAnswer(
spark.sql(
"""
|SELECT mydoublesum(value), key
|FROM agg1
|GROUP BY key
""".stripMargin),
Row(60.0, 1) :: Row(-1.0, 2) :: Row(null, 3) :: Row(30.0, null) :: Nil)
checkAnswer(
spark.sql(
"""
|SELECT mydoublesum(value) FROM agg1
""".stripMargin),
Row(89.0) :: Nil)
checkAnswer(
spark.sql(
"""
|SELECT mydoublesum(null)
""".stripMargin),
Row(null) :: Nil)
}
test("interpreted and expression-based aggregation functions") {
checkAnswer(
spark.sql(
"""
|SELECT mydoublesum(value), key, avg(value)
|FROM agg1
|GROUP BY key
""".stripMargin),
Row(60.0, 1, 20.0) ::
Row(-1.0, 2, -0.5) ::
Row(null, 3, null) ::
Row(30.0, null, 10.0) :: Nil)
checkAnswer(
spark.sql(
"""
|SELECT
| mydoublesum(value + 1.5 * key),
| avg(value - key),
| key,
| mydoublesum(value - 1.5 * key),
| avg(value)
|FROM agg1
|GROUP BY key
""".stripMargin),
Row(64.5, 19.0, 1, 55.5, 20.0) ::
Row(5.0, -2.5, 2, -7.0, -0.5) ::
Row(null, null, 3, null, null) ::
Row(null, null, null, null, 10.0) :: Nil)
}
test("single distinct column set") {
checkAnswer(
spark.sql(
"""
|SELECT
| mydoubleavg(distinct value1),
| avg(value1),
| avg(value2),
| key,
| mydoubleavg(value1 - 1),
| mydoubleavg(distinct value1) * 0.1,
| avg(value1 + value2)
|FROM agg2
|GROUP BY key
""".stripMargin),
Row(120.0, 70.0/3.0, -10.0/3.0, 1, 67.0/3.0 + 100.0, 12.0, 20.0) ::
Row(100.0, 1.0/3.0, 1.0, 2, -2.0/3.0 + 100.0, 10.0, 2.0) ::
Row(null, null, 3.0, 3, null, null, null) ::
Row(110.0, 10.0, 20.0, null, 109.0, 11.0, 30.0) :: Nil)
checkAnswer(
spark.sql(
"""
|SELECT
| key,
| mydoubleavg(distinct value1),
| mydoublesum(value2),
| mydoublesum(distinct value1),
| mydoubleavg(distinct value1),
| mydoubleavg(value1)
|FROM agg2
|GROUP BY key
""".stripMargin),
Row(1, 120.0, -10.0, 40.0, 120.0, 70.0/3.0 + 100.0) ::
Row(2, 100.0, 3.0, 0.0, 100.0, 1.0/3.0 + 100.0) ::
Row(3, null, 3.0, null, null, null) ::
Row(null, 110.0, 60.0, 30.0, 110.0, 110.0) :: Nil)
}
test("multiple distinct multiple columns sets") {
checkAnswer(
spark.sql(
"""
|SELECT
| key,
| count(distinct value1),
| sum(distinct value1),
| count(distinct value2),
| sum(distinct value2),
| count(distinct value1, value2),
| longProductSum(distinct value1, value2),
| count(value1),
| sum(value1),
| count(value2),
| sum(value2),
| longProductSum(value1, value2),
| count(*),
| count(1)
|FROM agg2
|GROUP BY key
""".stripMargin),
Row(null, 3, 30, 3, 60, 3, -4700, 3, 30, 3, 60, -4700, 4, 4) ::
Row(1, 2, 40, 3, -10, 3, -100, 3, 70, 3, -10, -100, 3, 3) ::
Row(2, 2, 0, 1, 1, 1, 1, 3, 1, 3, 3, 2, 4, 4) ::
Row(3, 0, null, 1, 3, 0, 0, 0, null, 1, 3, 0, 2, 2) :: Nil)
}
test("SPARK-32159: array encoders should be resolved in analyzer") {
checkAnswer(
spark.sql("SELECT arraysum(data) FROM agg3"),
Row(Seq(12.0, 15.0, 18.0)) :: Nil)
}
test("verify aggregator ser/de behavior") {
val data = sparkContext.parallelize((1 to 100).toSeq, 3).toDF("value1")
val agg = udaf(CountSerDeAgg)
checkAnswer(
data.agg(agg($"value1")),
Row(CountSerDeSQL(4, 4, 5050)) :: Nil)
}
test("verify type casting failure") {
assertThrows[org.apache.spark.sql.AnalysisException] {
spark.sql(
"""
|SELECT mydoublesum(boolvalues) FROM agg4
""".stripMargin)
}
}
}
@SlowHiveTest
class HashUDAQuerySuite extends UDAQuerySuite
@SlowHiveTest
class HashUDAQueryWithControlledFallbackSuite extends UDAQuerySuite {
override protected def checkAnswer(actual: => DataFrame, expectedAnswer: Seq[Row]): Unit = {
super.checkAnswer(actual, expectedAnswer)
Seq("true", "false").foreach { enableTwoLevelMaps =>
withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enabled" ->
enableTwoLevelMaps) {
(1 to 3).foreach { fallbackStartsAt =>
withSQLConf("spark.sql.TungstenAggregate.testFallbackStartsAt" ->
s"${(fallbackStartsAt - 1).toString}, ${fallbackStartsAt.toString}") {
QueryTest.getErrorMessageInCheckAnswer(actual, expectedAnswer) match {
case Some(errorMessage) =>
val newErrorMessage =
s"""
|The following aggregation query failed when using HashAggregate with
|controlled fallback (it falls back to bytes to bytes map once it has processed
|${fallbackStartsAt - 1} input rows and to sort-based aggregation once it has
|processed $fallbackStartsAt input rows). The query is ${actual.queryExecution}
|
|$errorMessage
""".stripMargin
fail(newErrorMessage)
case None => // Success
}
}
}
}
}
}
// Override it to make sure we call the actually overridden checkAnswer.
override protected def checkAnswer(df: => DataFrame, expectedAnswer: Row): Unit = {
checkAnswer(df, Seq(expectedAnswer))
}
// Override it to make sure we call the actually overridden checkAnswer.
override protected def checkAnswer(df: => DataFrame, expectedAnswer: DataFrame): Unit = {
checkAnswer(df, expectedAnswer.collect())
}
}
| shaneknapp/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala | Scala | apache-2.0 | 14,195 |
/*
* Copyright 2015 Alex Garella
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alexgarella.RxKinesis
import java.io.{FileReader, BufferedReader}
import java.nio.ByteBuffer
import com.alexgarella.RxKinesis.configuration.Configuration.{ConsumerConfiguration, PublisherConfiguration}
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.regions.Regions
import com.amazonaws.services.kinesis.AmazonKinesisClient
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
import com.amazonaws.services.kinesis.model.PutRecordRequest
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FeatureSpec, GivenWhenThen}
import rx.lang.scala.{Observable, Observer}
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.Random
class RxKinesisTest extends FeatureSpec with GivenWhenThen with BeforeAndAfter with MockitoSugar {
val (accessKeyID, secretAccessKey) = {
val reader = new BufferedReader(new FileReader(".credentials"))
val accessKeyId = reader.readLine()
val secretAccessKey = reader.readLine()
(accessKeyId, secretAccessKey)
}
val RegionName = "eu-central-1"
val StreamName = "TestStream"
val Date = DateTimeFormat.forPattern("yyyyMMdd").print(new DateTime())
def parser = (s: String) => Integer.parseInt(s)
var buffer: ListBuffer[Int] = ListBuffer.empty
feature("reactive streaming from Kinesis") {
val NumberOfElements = 10
def isEven = (x: Int) => { x % 2 == 0 }
scenario(s"streaming $NumberOfElements even numbers") {
Given("a Kinesis Observable which filters even numbers")
buffer = ListBuffer.empty
val rxKinesis = RxKinesisConsumer(parser, consumerConfig)
val kinesisObservable = rxKinesis.observable
.filter(isEven)
.take(NumberOfElements)
And("an observer")
val kinesisObserver = getObserver
When("subscribing")
kinesisObservable.subscribe(kinesisObserver)
And("starting the stream")
When("writing data to the Kinesis stream")
Future { writeToStream() }
Thread.sleep(30000)
Then("the result will contain only even numbers")
assertResult(true)(buffer.forall(isEven))
And(s"the result list will have $NumberOfElements elements")
assertResult(NumberOfElements)(buffer.size)
rxKinesis.stop()
}
scenario("composing two observables") {
buffer = ListBuffer.empty
Given(s"a composition of two streams of which the sum is calculated")
val rxKinesis = RxKinesisConsumer(parser, consumerConfig)
val o = Observable.just(1, 2, 3, 4, 5)
val kinesisObservable = rxKinesis.observable
.zipWith(o)((x, y) => x + y)
.sum
And("an observer")
val kinesisObserver = getObserver
When("subscribing")
kinesisObservable.subscribe(kinesisObserver)
And("starting the stream")
When("writing data to the Kinesis stream")
Future { writeToStream() }
Thread.sleep(30000)
Then(s"the result should be larger or equal to ${(1 to 5).sum}")
assertResult(true)(buffer.headOption.getOrElse(-1) >= (1 to 5).sum)
rxKinesis.stop()
}
scenario("merging two observables") {
buffer = ListBuffer.empty
Given(s"a Kinesis observable which is merged with a stream of 5 1s")
val rxKinesis = RxKinesisConsumer(parser, consumerConfig)
val o = Observable.just(1, 1, 1, 1, 1)
val kinesisObservable = rxKinesis.observable
.filter(_ != 1)
.merge(o)
.take(NumberOfElements)
And("an observer")
val kinesisObserver = getObserver
When("subscribing")
kinesisObservable.subscribe(kinesisObserver)
And("starting the stream")
When("writing data to the Kinesis stream")
Future { writeToStream() }
Thread.sleep(30000)
Then(s"the result should contain $NumberOfElements elements")
assertResult(NumberOfElements)(buffer.size)
Then(s"the result should contain 5 1s")
assertResult(5)(buffer.count(_ == 1))
Then(s"the result should contain ${NumberOfElements - 5} elements other than 1")
assertResult(NumberOfElements - 5)(buffer.count(_ != 1))
rxKinesis.stop()
}
}
feature("reactive streaming to Amazon Kinesis") {
buffer = ListBuffer.empty
val rxKinesis = RxKinesisConsumer(parser, consumerConfig)
rxKinesis.observable.subscribe(getObserver)
Thread.sleep(30000)
val config = PublisherConfiguration(profileCredentialsProviderMock, StreamName, RegionName, s"RxKinesisTest$Date", "1", None)
RxKinesisPublisher((x: Int) => x.toString, Observable.just(1, 2, 3, 4, 5), config)
Thread.sleep(3000)
assertResult(List(1, 2, 3, 4, 5))(buffer.toList)
rxKinesis.stop()
}
def getObserver: Observer[Int] = new Observer[Int] {
override def onNext(value: Int): Unit = buffer += value
override def onError(error: Throwable): Unit = println(error.getMessage)
}
def writeToStream(): Unit = {
Thread.sleep(20000)
val client: AmazonKinesisClient = new AmazonKinesisClient(profileCredentialsProviderMock).withRegion(Regions.fromName(RegionName))
while (true) {
val putRecordRequest = new PutRecordRequest
putRecordRequest.setStreamName(StreamName)
val value = (new Random).nextInt(100).toString.getBytes
putRecordRequest.setData(ByteBuffer.wrap(value))
putRecordRequest.setPartitionKey("1")
client.putRecord(putRecordRequest)
Thread.sleep(100)
}
}
def consumerConfig: ConsumerConfiguration =
ConsumerConfiguration(profileCredentialsProviderMock, StreamName, RegionName, s"RxKinesisTest$Date", InitialPositionInStream.LATEST,
None)
def profileCredentialsProviderMock: ProfileCredentialsProvider = {
val basicAWSCredentials = new BasicAWSCredentials(accessKeyID, secretAccessKey)
val profileCredentialsProvider = mock[ProfileCredentialsProvider]
doReturn(basicAWSCredentials).when(profileCredentialsProvider).getCredentials
profileCredentialsProvider
}
}
| agarella/RxKinesis | src/test/scala/com/alexgarella/RxKinesis/RxKinesisTest.scala | Scala | apache-2.0 | 6,903 |
package scala.slick.driver
import java.sql.Types._
import scala.slick.ql.TypeMapperDelegate
trait BasicSQLUtilsComponent {
def quoteIdentifier(id: String): String = {
val s = new StringBuilder(id.length + 4) append '"'
for(c <- id) if(c == '"') s append "\\"\\"" else s append c
s append '"' toString
}
def mapTypeName(tmd: TypeMapperDelegate[_]): String = tmd.sqlType match {
case VARCHAR => "VARCHAR(254)"
case _ => tmd.sqlTypeName
}
}
| szeiger/scala-query | src/main/scala/scala/slick/driver/BasicSQLUtilsComponent.scala | Scala | bsd-2-clause | 468 |
package pamflet
import java.io.{File,FileOutputStream,InputStream,
OutputStream,ByteArrayInputStream}
import scala.annotation.tailrec
object Produce {
def apply(globalized: Globalized, target: File) {
globalized.languages foreach { lang =>
if (lang == globalized.defaultLanguage) apply(globalized.defaultContents, globalized, target)
else apply(globalized(lang), globalized, new File(target, lang))
}
}
def apply(contents: Contents, globalized: Globalized, target: File) {
def writeString(path: String, contents: String, target:File) {
write(path, target, new ByteArrayInputStream(contents.getBytes("utf-8")))
}
def write(path: String, target:File, r: InputStream) {
val file = new File(target, path)
new File(file.getParent).mkdirs()
val w = new FileOutputStream(file)
copy(r, w)
r.close()
w.close()
}
def copy(r: InputStream, w: OutputStream) {
@tailrec def doCopy: Unit = {
val byte = r.read()
if (byte != -1) {
w.write(byte)
doCopy
}
}
doCopy
w.flush()
}
val manifest = "pamflet.manifest"
val offlineTarget = new File(target + "/offline/")
val css = contents.css.map { case (nm, v) => ("css/" + nm, v) }.toList
val paths = filePaths(contents)
val files = contents.files.toList.map {
case (nm, u) => ("files/" + nm, u)
}
val favicon = contents.favicon.toList.map {
case u => ("favicon.ico", u)
}
val heights = Heights.distinctHeights(contents)
// generate the pages in target directory and in
// subdirectory "offline" with html5 manifest
List(Some(manifest), None).foreach { manifestOpt =>
val offline = ! manifestOpt.isEmpty
val targetDir = (if (offline) offlineTarget else target)
val printer = Printer(contents, globalized, manifestOpt)
contents.pages.foreach { page =>
val w = new java.io.StringWriter()
xml.XML.write(w,
printer.print(page),
"utf-8",
xmlDecl = false,
doctype = xml.dtd.DocType(
"html",
xml.dtd.SystemID("about:legacy-compat"),
Nil
)
)
val pagePath = Printer.fileify(page)
writeString(pagePath, w.toString, targetDir)
}
css.foreach { case (path, contents) =>
writeString(path, contents, targetDir)
}
heights foreach { case (hh, fh) =>
writeString("css/" + Heights.heightCssFileName(hh, fh), Heights.heightCssFileContent(hh, fh), targetDir)
}
paths.foreach { path =>
write(path,
targetDir,
new java.net.URL(Shared.resources, path).openStream()
)
}
for ((path, uri) <- files ++ favicon)
write(path, targetDir, uri.toURL.openStream)
writeString("Combined+Pages.md", contents.scrollPage.raw, targetDir)
}
writeString(manifest, (
"CACHE MANIFEST" ::
// cache file must change between updates
("# " + new java.util.Date) ::
css.map { case (n,_) => n } :::
contents.pages.map { p => Printer.webify(p) } :::
files.map { case (n, _) => n } :::
favicon.map { case (n, _) => n } :::
paths).mkString("\\n"),
offlineTarget
)
}
def filePaths(contents: Contents) =
("fork.png" :: "twitter-bird-dark-bgs.png" :: Nil).map {
"img/" + _
} :::
("pamflet.css" :: "pamflet-grid.css" :: "pamflet-print.css" ::
"color_scheme-redmond.css" :: "color_scheme-github.css" :: "color_scheme-monokai.css" :: Nil).map {
"css/" + _
} :::
("screen.css" :: "grid.css" :: "print.css" :: "ie.css" :: Nil).map {
"css/blueprint/" + _
} :::
("jquery-1.6.2.min.js" ::
"jquery.collapse.js" ::
"pamflet.js" :: Nil
).map { "js/" + _ } :::
"css/prettify.css" ::
("prettify.js" ::
contents.prettifyLangs.map { l => "lang-%s.js".format(l) }.toList
).map {
"js/prettify/" + _
}
}
| n8han/pamflet | library/src/main/scala/produce.scala | Scala | lgpl-3.0 | 4,128 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.Serializable
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.metrics.source.Source
import org.apache.spark.unsafe.memory.TaskMemoryManager
import org.apache.spark.util.TaskCompletionListener
object TaskContext {
/**
* Return the currently active TaskContext. This can be called inside of
* user functions to access contextual information about running tasks.
*/
def get(): TaskContext = taskContext.get
/**
* Returns the partition id of currently active TaskContext. It will return 0
* if there is no active TaskContext for cases like local execution.
*/
def getPartitionId(): Int = {
val tc = taskContext.get()
if (tc eq null) {
0
} else {
tc.partitionId()
}
}
private[this] val taskContext: ThreadLocal[TaskContext] = new ThreadLocal[TaskContext]
// Note: protected[spark] instead of private[spark] to prevent the following two from
// showing up in JavaDoc.
/**
* Set the thread local TaskContext. Internal to Spark.
*/
protected[spark] def setTaskContext(tc: TaskContext): Unit = taskContext.set(tc)
/**
* Unset the thread local TaskContext. Internal to Spark.
*/
protected[spark] def unset(): Unit = taskContext.remove()
/**
* An empty task context that does not represent an actual task.
*/
private[spark] def empty(): TaskContextImpl = {
new TaskContextImpl(0, 0, 0, 0, null, null, Seq.empty)
}
}
/**
* Contextual information about a task which can be read or mutated during
* execution. To access the TaskContext for a running task, use:
* {{{
* org.apache.spark.TaskContext.get()
* }}}
*/
abstract class TaskContext extends Serializable {
// Note: TaskContext must NOT define a get method. Otherwise it will prevent the Scala compiler
// from generating a static get method (based on the companion object's get method).
// Note: Update JavaTaskContextCompileCheck when new methods are added to this class.
// Note: getters in this class are defined with parentheses to maintain backward compatibility.
/**
* Returns true if the task has completed.
*/
def isCompleted(): Boolean
/**
* Returns true if the task has been killed.
*/
def isInterrupted(): Boolean
@deprecated("use isRunningLocally", "1.2.0")
def runningLocally(): Boolean
/**
* Returns true if the task is running locally in the driver program.
* @return
*/
def isRunningLocally(): Boolean
/**
* Adds a (Java friendly) listener to be executed on task completion.
* This will be called in all situation - success, failure, or cancellation.
* An example use is for HadoopRDD to register a callback to close the input stream.
*/
def addTaskCompletionListener(listener: TaskCompletionListener): TaskContext
/**
* Adds a listener in the form of a Scala closure to be executed on task completion.
* This will be called in all situations - success, failure, or cancellation.
* An example use is for HadoopRDD to register a callback to close the input stream.
*/
def addTaskCompletionListener(f: (TaskContext) => Unit): TaskContext
/**
* Adds a callback function to be executed on task completion. An example use
* is for HadoopRDD to register a callback to close the input stream.
* Will be called in any situation - success, failure, or cancellation.
*
* @param f Callback function.
*/
@deprecated("use addTaskCompletionListener", "1.2.0")
def addOnCompleteCallback(f: () => Unit)
/**
* The ID of the stage that this task belong to.
*/
def stageId(): Int
/**
* The ID of the RDD partition that is computed by this task.
*/
def partitionId(): Int
/**
* How many times this task has been attempted. The first task attempt will be assigned
* attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
*/
def attemptNumber(): Int
@deprecated("use attemptNumber", "1.3.0")
def attemptId(): Long
/**
* An ID that is unique to this task attempt (within the same SparkContext, no two task attempts
* will share the same attempt ID). This is roughly equivalent to Hadoop's TaskAttemptID.
*/
def taskAttemptId(): Long
/** ::DeveloperApi:: */
@DeveloperApi
def taskMetrics(): TaskMetrics
/**
* ::DeveloperApi::
* Returns all metrics sources with the given name which are associated with the instance
* which runs the task. For more information see [[org.apache.spark.metrics.MetricsSystem!]].
*/
@DeveloperApi
def getMetricsSources(sourceName: String): Seq[Source]
/**
* Returns the manager for this task's managed memory.
*/
private[spark] def taskMemoryManager(): TaskMemoryManager
/**
* Register an accumulator that belongs to this task. Accumulators must call this method when
* deserializing in executors.
*/
private[spark] def registerAccumulator(a: Accumulable[_, _]): Unit
/**
* Return the local values of internal accumulators that belong to this task. The key of the Map
* is the accumulator id and the value of the Map is the latest accumulator local value.
*/
private[spark] def collectInternalAccumulators(): Map[Long, Any]
/**
* Return the local values of accumulators that belong to this task. The key of the Map is the
* accumulator id and the value of the Map is the latest accumulator local value.
*/
private[spark] def collectAccumulators(): Map[Long, Any]
/**
* Accumulators for tracking internal metrics indexed by the name.
*/
private[spark] val internalMetricsToAccumulators: Map[String, Accumulator[Long]]
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/TaskContext.scala | Scala | apache-2.0 | 6,493 |
/*
* Copyright 2017-2020 Aleksey Fomkin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package korolev.internal
import korolev.internal.Frontend.ModifyDomProcedure
import levsha.Id
import levsha.impl.DiffRenderContext.ChangesPerformer
import scala.collection.mutable
private[korolev] class RemoteDomChangesPerformer extends ChangesPerformer {
val buffer: mutable.ArrayBuffer[Any] =
mutable.ArrayBuffer.empty[Any]
def remove(id: Id): Unit = {
buffer += ModifyDomProcedure.Remove.code
buffer += id.parent.get.mkString
buffer += id.mkString
}
def createText(id: Id, text: String): Unit = {
buffer += ModifyDomProcedure.CreateText.code
buffer += id.parent.get.mkString
buffer += id.mkString
buffer += text
}
def create(id: Id, xmlNs: String, tag: String): Unit = {
val parent = id.parent.fold("0")(_.mkString)
val pXmlns =
if (xmlNs eq levsha.XmlNs.html.uri) 0
else xmlNs
buffer += ModifyDomProcedure.Create.code
buffer += parent
buffer += id.mkString
buffer += pXmlns
buffer += tag
}
def removeStyle(id: Id, name: String): Unit = {
buffer += ModifyDomProcedure.RemoveStyle.code
buffer += id.mkString
buffer += name
}
def setStyle(id: Id, name: String, value: String): Unit = {
buffer += ModifyDomProcedure.SetStyle.code
buffer += id.mkString
buffer += name
buffer += value
}
def setAttr(id: Id, xmlNs: String, name: String, value: String): Unit = {
val pXmlns =
if (xmlNs eq levsha.XmlNs.html.uri) 0
else xmlNs
buffer += ModifyDomProcedure.SetAttr.code
buffer += id.mkString
buffer += pXmlns
buffer += name
buffer += value
buffer += false
}
def removeAttr(id: Id, xmlNs: String, name: String): Unit = {
val pXmlns =
if (xmlNs eq levsha.XmlNs.html.uri) 0
else xmlNs
buffer += ModifyDomProcedure.RemoveAttr.code
buffer += id.mkString
buffer += pXmlns
buffer += name
buffer += false
}
}
| fomkin/korolev | modules/korolev/src/main/scala/korolev/internal/RemoteDomChangesPerformer.scala | Scala | apache-2.0 | 2,516 |
package nest.sparkle.time.transform
import spray.json._
/** Convert a MinMax to/from a two element json array */
object MinMaxJson extends DefaultJsonProtocol {
implicit def MinMaxFormat[T: JsonFormat]: JsonFormat[MinMax[T]] = {
new JsonFormat[MinMax[T]] {
def write(minMax: MinMax[T]): JsValue = {
JsArray(minMax.min.toJson, minMax.max.toJson)
}
def read(value: JsValue): MinMax[T] = {
value match {
case JsArray(Vector(min, max)) =>
MinMax(min.convertTo[T], max.convertTo[T])
case x => throw new DeserializationException(s"MinMax expected, got $x")
}
}
}
}
}
import MinMaxJson.MinMaxFormat
/** Convert a KeyValueRanges to json in the following shape:
*
* [
* ["keyRange", [0, 100]],
* ["valueRange", [2.1, 2.94]]
* ]
*
*/
object KeyValueRangesJson extends DefaultJsonProtocol {
/** json read/write formatting KeyValueRanges */
implicit def KeyValueRangesFormat[T: JsonFormat, U: JsonFormat]: JsonFormat[KeyValueRanges[T, U]] = {
new JsonFormat[KeyValueRanges[T, U]] {
def write(limits: KeyValueRanges[T, U]): JsValue = {
val keyRangeProperty = JsArray("keyRange".toJson, limits.keyRange.toJson)
val valueRangeProperty = JsArray("valueRange".toJson, limits.valueRange.toJson)
JsArray(keyRangeProperty, valueRangeProperty)
}
def read(value: JsValue): KeyValueRanges[T, U] = {
value match {
case JsArray(Vector(
JsArray(Vector(JsString("keyRange"), keyRangeJs)),
JsArray(Vector(JsString("valueRange"), valueRangeJs))
)) =>
val keyRange = keyRangeJs.convertTo[MinMax[T]]
val valueRange = valueRangeJs.convertTo[MinMax[U]]
KeyValueRanges(keyRange, valueRange)
case x => throw new DeserializationException(s"KeyValueRanges expected, got $x")
}
}
}
}
/** the json representation of KeyValueRanges on an empty column */
val Empty = Seq(
JsArray("keyRange".toJson, JsArray()),
JsArray("valueRange".toJson, JsArray())
)
}
| mighdoll/sparkle | protocol/src/main/scala/nest/sparkle/time/transform/KeyValueRangesJson.scala | Scala | apache-2.0 | 2,122 |
package com.divisiblebyzero.hakea.processor
import akka.actor.{ Actor, Props }
import com.divisiblebyzero.hakea.config.Configuration
import com.divisiblebyzero.hakea.model.Project
import com.divisiblebyzero.hakea.solr.{ CommitInputDocuments, InputDocumentDispatcher }
import com.divisiblebyzero.hakea.util.Logging
import org.eclipse.jgit.lib.{ Ref, Repository }
sealed trait IndexProcessorRequest
case class IndexRepositoryFor(project: Project, repository: Repository, refs: List[Ref]) extends IndexProcessorRequest
case class FinishedIndexingCommitsFor(project: Project, repository: Repository, refs: List[Ref]) extends IndexProcessorRequest
case class FinishedIndexingFilesFor(project: Project, repository: Repository, refs: List[Ref]) extends IndexProcessorRequest
class IndexProcessor(configuration: Configuration) extends Actor with Logging {
protected val commitIndexProcessor =
context.actorOf(Props(new CommitIndexProcessor(configuration)), "commitIndexProcessor")
protected val fileIndexProcessor =
context.actorOf(Props(new FileIndexProcessor(configuration)), "fileIndexProcessor")
protected val inputDocumentDispatcher =
context.actorOf(Props(new InputDocumentDispatcher(configuration))
.withDispatcher("hakea.dispatcher.input-document-dispatcher"), "inputDocumentDispatcher")
def receive = {
case IndexRepositoryFor(project, repository, refs) => {
commitIndexProcessor ! IndexCommitsFor(project, repository, refs)
}
case FinishedIndexingCommitsFor(project, repository, refs) => {
log.info("Finished indexing the commit history of %s.".format(project.name))
fileIndexProcessor ! IndexFilesFor(project, repository, refs)
}
case FinishedIndexingFilesFor(project, repository, refs) => {
log.info("Finished indexing files for %s.".format(project.name))
inputDocumentDispatcher ! CommitInputDocuments
}
}
}
| eczarny/hakea | hakea-indexing-core/src/main/scala/com/divisiblebyzero/hakea/processor/IndexProcessor.scala | Scala | mit | 1,923 |
package controllers
import lila.app._
import views._
final class Event(env: Env) extends LilaController(env) {
private def api = env.event.api
def show(id: String) =
Open { implicit ctx =>
OptionOk(api oneEnabled id) { event =>
html.event.show(event)
}
}
def manager =
Secure(_.ManageEvent) { implicit ctx => _ =>
api.list map { events =>
html.event.manager(events)
}
}
def edit(id: String) =
Secure(_.ManageEvent) { implicit ctx => _ =>
OptionOk(api one id) { event =>
html.event.edit(event, api editForm event)
}
}
def update(id: String) =
SecureBody(_.ManageEvent) { implicit ctx => me =>
OptionFuResult(api one id) { event =>
implicit val req = ctx.body
api
.editForm(event)
.bindFromRequest()
.fold(
err => BadRequest(html.event.edit(event, err)).fuccess,
data => api.update(event, data, me.user) inject Redirect(routes.Event.edit(id)).flashSuccess
)
}
}
def form =
Secure(_.ManageEvent) { implicit ctx => _ =>
Ok(html.event.create(api.createForm)).fuccess
}
def create =
SecureBody(_.ManageEvent) { implicit ctx => me =>
implicit val req = ctx.body
api.createForm
.bindFromRequest()
.fold(
err => BadRequest(html.event.create(err)).fuccess,
data =>
api.create(data, me.id) map { event =>
Redirect(routes.Event.edit(event.id)).flashSuccess
}
)
}
def cloneE(id: String) =
Secure(_.ManageEvent) { implicit ctx => _ =>
OptionFuResult(api one id) { old =>
val event = api clone old
Ok(html.event.create(api editForm event)).fuccess
}
}
}
| luanlv/lila | app/controllers/Event.scala | Scala | mit | 1,801 |
package io.coding.me.m2p2.core.actor
import akka.actor.ActorLogging
import akka.actor.Actor
import akka.actor.Props
import org.joda.time.DateTime
import io.coding.me.m2p2.core.actor.artifact.ArtifactCollector
/**
* Companion object
*/
object RepositoryReceptionist {
/**
* Factory method for the actor system
*/
def props(repositoryId: RepositoryId): Props = Props(new RepositoryReceptionist(repositoryId, DateTime.now()))
}
class RepositoryReceptionist(repositoryId: RepositoryId, created: DateTime) extends Actor with ActorLogging {
log.info(s"Creating repository-receptionist ${repositoryId}")
val artifactCollectorRef = context.actorOf(ArtifactCollector.props(repositoryId), "actor-collector")
override def receive = {
case iar: InsertArtifactRequest =>
artifactCollectorRef.forward(iar)
case dar: DeleteArtifactRequest =>
artifactCollectorRef.forward(dar)
}
} | tssp/maven-p2-view | core/src/main/scala/io/coding/me/m2p2/core/actor/RepositoryReceptionist.scala | Scala | mit | 922 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.scope
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.apache.spark.Logging
import org.apache.spark.scheduler.StageInfo
import org.apache.spark.storage.StorageLevel
/**
* A representation of a generic cluster graph used for storing information on RDD operations.
*
* Each graph is defined with a set of edges and a root cluster, which may contain children
* nodes and children clusters. Additionally, a graph may also have edges that enter or exit
* the graph from nodes that belong to adjacent graphs.
*/
private[ui] case class RDDOperationGraph(
edges: Seq[RDDOperationEdge],
outgoingEdges: Seq[RDDOperationEdge],
incomingEdges: Seq[RDDOperationEdge],
rootCluster: RDDOperationCluster)
/** A node in an RDDOperationGraph. This represents an RDD. */
private[ui] case class RDDOperationNode(id: Int, name: String, cached: Boolean)
/**
* A directed edge connecting two nodes in an RDDOperationGraph.
* This represents an RDD dependency.
*/
private[ui] case class RDDOperationEdge(fromId: Int, toId: Int)
/**
* A cluster that groups nodes together in an RDDOperationGraph.
*
* This represents any grouping of RDDs, including operation scopes (e.g. textFile, flatMap),
* stages, jobs, or any higher level construct. A cluster may be nested inside of other clusters.
*/
private[ui] class RDDOperationCluster(val id: String, private var _name: String) {
private val _childNodes = new ListBuffer[RDDOperationNode]
private val _childClusters = new ListBuffer[RDDOperationCluster]
def name: String = _name
def setName(n: String): Unit = { _name = n }
def childNodes: Seq[RDDOperationNode] = _childNodes.iterator.toSeq
def childClusters: Seq[RDDOperationCluster] = _childClusters.iterator.toSeq
def attachChildNode(childNode: RDDOperationNode): Unit = { _childNodes += childNode }
def attachChildCluster(childCluster: RDDOperationCluster): Unit = {
_childClusters += childCluster
}
/** Return all the nodes which are cached. */
def getCachedNodes: Seq[RDDOperationNode] = {
_childNodes.filter(_.cached) ++ _childClusters.flatMap(_.getCachedNodes)
}
}
private[ui] object RDDOperationGraph extends Logging {
val STAGE_CLUSTER_PREFIX = "stage_"
/**
* Construct a RDDOperationGraph for a given stage.
*
* The root cluster represents the stage, and all children clusters represent RDD operations.
* Each node represents an RDD, and each edge represents a dependency between two RDDs pointing
* from the parent to the child.
*
* This does not currently merge common operation scopes across stages. This may be worth
* supporting in the future if we decide to group certain stages within the same job under
* a common scope (e.g. part of a SQL query).
*/
def makeOperationGraph(stage: StageInfo): RDDOperationGraph = {
val edges = new ListBuffer[RDDOperationEdge]
val nodes = new mutable.HashMap[Int, RDDOperationNode]
val clusters = new mutable.HashMap[String, RDDOperationCluster] // indexed by cluster ID
// Root cluster is the stage cluster
// Use a special prefix here to differentiate this cluster from other operation clusters
val stageClusterId = STAGE_CLUSTER_PREFIX + stage.stageId
val stageClusterName = s"Stage ${stage.stageId}" +
{ if (stage.attemptId == 0) "" else s" (attempt ${stage.attemptId})" }
val rootCluster = new RDDOperationCluster(stageClusterId, stageClusterName)
// Find nodes, edges, and operation scopes that belong to this stage
stage.rddInfos.foreach { rdd =>
edges ++= rdd.parentIds.map { parentId => RDDOperationEdge(parentId, rdd.id) }
// TODO: differentiate between the intention to cache an RDD and whether it's actually cached
val node = nodes.getOrElseUpdate(
rdd.id, RDDOperationNode(rdd.id, rdd.name, rdd.storageLevel != StorageLevel.NONE))
if (rdd.scope.isEmpty) {
// This RDD has no encompassing scope, so we put it directly in the root cluster
// This should happen only if an RDD is instantiated outside of a public RDD API
rootCluster.attachChildNode(node)
} else {
// Otherwise, this RDD belongs to an inner cluster,
// which may be nested inside of other clusters
val rddScopes = rdd.scope.map { scope => scope.getAllScopes }.getOrElse(Seq.empty)
val rddClusters = rddScopes.map { scope =>
val clusterId = scope.id
val clusterName = scope.name.replaceAll("\\\\n", "\\\\\\\\n")
clusters.getOrElseUpdate(clusterId, new RDDOperationCluster(clusterId, clusterName))
}
// Build the cluster hierarchy for this RDD
rddClusters.sliding(2).foreach { pc =>
if (pc.size == 2) {
val parentCluster = pc(0)
val childCluster = pc(1)
parentCluster.attachChildCluster(childCluster)
}
}
// Attach the outermost cluster to the root cluster, and the RDD to the innermost cluster
rddClusters.headOption.foreach { cluster => rootCluster.attachChildCluster(cluster) }
rddClusters.lastOption.foreach { cluster => cluster.attachChildNode(node) }
}
}
// Classify each edge as internal, outgoing or incoming
// This information is needed to reason about how stages relate to each other
val internalEdges = new ListBuffer[RDDOperationEdge]
val outgoingEdges = new ListBuffer[RDDOperationEdge]
val incomingEdges = new ListBuffer[RDDOperationEdge]
edges.foreach { case e: RDDOperationEdge =>
val fromThisGraph = nodes.contains(e.fromId)
val toThisGraph = nodes.contains(e.toId)
(fromThisGraph, toThisGraph) match {
case (true, true) => internalEdges += e
case (true, false) => outgoingEdges += e
case (false, true) => incomingEdges += e
// should never happen
case _ => logWarning(s"Found an orphan edge in stage ${stage.stageId}: $e")
}
}
RDDOperationGraph(internalEdges, outgoingEdges, incomingEdges, rootCluster)
}
/**
* Generate the content of a dot file that describes the specified graph.
*
* Note that this only uses a minimal subset of features available to the DOT specification.
* Part of the styling must be done here because the rendering library must take certain
* attributes into account when arranging the graph elements. More style is added in the
* visualization later through post-processing in JavaScript.
*
* For the complete DOT specification, see http://www.graphviz.org/Documentation/dotguide.pdf.
*/
def makeDotFile(graph: RDDOperationGraph): String = {
val dotFile = new StringBuilder
dotFile.append("digraph G {\\n")
dotFile.append(makeDotSubgraph(graph.rootCluster, indent = " "))
graph.edges.foreach { edge => dotFile.append(s""" ${edge.fromId}->${edge.toId};\\n""") }
dotFile.append("}")
val result = dotFile.toString()
logDebug(result)
result
}
/** Return the dot representation of a node in an RDDOperationGraph. */
private def makeDotNode(node: RDDOperationNode): String = {
s"""${node.id} [label="${node.name} [${node.id}]"]"""
}
/** Return the dot representation of a subgraph in an RDDOperationGraph. */
private def makeDotSubgraph(cluster: RDDOperationCluster, indent: String): String = {
val subgraph = new StringBuilder
subgraph.append(indent + s"subgraph cluster${cluster.id} {\\n")
subgraph.append(indent + s""" label="${cluster.name}";\\n""")
cluster.childNodes.foreach { node =>
subgraph.append(indent + s" ${makeDotNode(node)};\\n")
}
cluster.childClusters.foreach { cscope =>
subgraph.append(makeDotSubgraph(cscope, indent + " "))
}
subgraph.append(indent + "}\\n")
subgraph.toString()
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/ui/scope/RDDOperationGraph.scala | Scala | apache-2.0 | 8,634 |
package org.yotchang4s.pixiv.user
import org.yotchang4s.pixiv.Identity
import org.yotchang4s.pixiv.Entity
case class UserId(value: String) extends Identity[String]
class User(
val identity: UserId,
val name: String,
val profileImageUrl: String) extends Entity[UserId] {
} | yotchang4s/yapix | src/org/yotchang4s/pixiv/user/User.scala | Scala | bsd-3-clause | 281 |
package controllers
import javax.inject.Inject
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.libs.json.Json
import play.api.mvc._
import play.api.routing.JavaScriptReverseRouter
import scala.concurrent.Future
class Application @Inject()(val messagesApi: MessagesApi) extends Controller with I18nSupport {
def index = Action {
Ok(views.html.index())
}
def indexManager = Action { implicit request =>
Ok(views.html.manager.index())
}
val myList = List(1 to 10)
def list = Action {
Ok(Json.toJson(myList))
}
def viewManager(template: String) = Action.async { implicit request =>
Future.successful(
template match {
case "index" => Ok("")
case "home" => Ok(views.html.manager.home())
case "menu" => Ok(views.html.manager.menu.menu())
case "aside" => Ok(views.html.manager.menu.aside())
case "list_user" => Ok(views.html.manager.list.list_user())
case "info_user" => Ok(views.html.manager.menu.info_user())
case "info_home" => Ok(views.html.manager.infos.info_home())
case "login_user" => Ok(views.html.manager.forms.login_user())
case "confirmation_modal" => Ok(views.html.alerts.ui_confirmation_modal())
case "list_category" => Ok(views.html.manager.list.lista_category())
case "list_product" => Ok(views.html.manager.list.list_product())
case "list_gallery" => Ok(views.html.manager.list.list_gallery())
case "list_contact" => Ok(views.html.manager.list.list_contacts())
case _ => NotFound
}
)
}
def view(template: String) = Action.async { implicit request =>
Future.successful(
template match {
case "home" => Ok(views.html.home())
case "lista_produtos" => Ok(views.html.lista_produtos())
case "produto" => Ok(views.html.produto())
case "categorias" => Ok(views.html.categorias())
case "galeria" => Ok(views.html.galeria())
case "contato" => Ok(views.html.contato())
case "saiba" => Ok(views.html.saiba())
case "ui_alert" => Ok(views.html.alerts.ui_alert())
case _ => NotFound
}
)
}
def jsRoutes = Action { implicit request =>
Ok(
JavaScriptReverseRouter("jsRoutes")(
)).as("text/javascript")
}
} | carlosFattor/DoceTentacaoSlick | app/controllers/Application.scala | Scala | apache-2.0 | 2,302 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.common
import com.intel.analytics.bigdl.utils.RandomGenerator
import org.apache.hadoop.fs.Path
import org.scalatest.{FlatSpec, Matchers}
class UtilsSpec extends FlatSpec with Matchers {
val path: String = getClass.getClassLoader.getResource("qa").getPath
val txtRelations: String = path + "/relations.txt"
"Utils listFiles" should "work properly" in {
val files = Utils.listPaths(path)
assert(files.size == 3)
val recursiveFiles = Utils.listPaths(path, true)
assert(recursiveFiles.size == 13)
}
"Utils readBytes" should "work properly" in {
val inputStream = Utils.open(txtRelations)
val fileLen = inputStream.available()
inputStream.close()
val bytes = Utils.readBytes(txtRelations)
assert(bytes.length == fileLen)
}
"Utils saveBytes" should "work properly" in {
val fs = Utils.getFileSystem(path)
// Generate random file
val tmpFile = System.currentTimeMillis()
val randomContent = new Array[Byte](1000)
Utils.saveBytes(randomContent, path + "/" + tmpFile)
// Delete random file
fs.deleteOnExit(new Path(path + "/" + tmpFile))
fs.close()
}
}
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/common/UtilsSpec.scala | Scala | apache-2.0 | 1,773 |
package com.itszuvalex.itszulib.api.multiblock
import net.minecraft.world.World
/**
* @author Itszuvalex
* Interface for MultiBlock components for easy implementation.
*/
trait IMultiBlockComponent {
/**
* @return True if this is in valid MultiBlock
*/
def isValidMultiBlock: Boolean
/**
* @param x
* @param y
* @param z
* @return True if correctly forms, given controller block at x,y,z.
*/
def formMultiBlock(world: World, x: Int, y: Int, z: Int): Boolean
/**
* @param x
* @param y
* @param z
* @return True if breaks without errors, given controller block at x,y,z.
*/
def breakMultiBlock(world: World, x: Int, y: Int, z: Int): Boolean
/**
* @return MultiBlockInfo associated with this MultiBlockComponent
*/
def getInfo: MultiBlockInfo
}
| BlockWorker/ItszuLib | src/main/scala/com/itszuvalex/itszulib/api/multiblock/IMultiBlockComponent.scala | Scala | gpl-2.0 | 816 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.ebenezer
package scrooge
import com.twitter.scrooge.ThriftStruct
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.JobConf
import parquet.hadoop.api.WriteSupport
import parquet.hadoop.api.WriteSupport.WriteContext
import parquet.hadoop.mapred.DeprecatedParquetOutputFormat
import parquet.io.ColumnIOFactory
import parquet.io.api.RecordConsumer
import parquet.schema.MessageType
import parquet.thrift.ParquetWriteProtocol
import parquet.thrift.ThriftSchemaConverter
import parquet.thrift.struct.ThriftType.StructType
class ScroogeWriteSupport[A <: ThriftStruct] extends WriteSupport[A] {
/* these rely on the WriteSupport lifecycle, it is horrible and unpleasant,
but they are left as nullable fields to avoid unpacking on every write
(with no recourse for failure anyway) */
var schema: MessageType = null
var struct: StructType = null
var parquetWriteProtocol: ParquetWriteProtocol = null
def init(config: Configuration): WriteContext = {
val thrift = ScroogeReadWriteSupport.getThriftClass[A](config, ScroogeWriteSupport.thriftClass)
val converter = new ScroogeStructConverter
struct = converter.convert(thrift)
schema = ThriftSchemaConverter.convertWithoutProjection(struct)
val extra = new java.util.HashMap[String, String]
extra.put("thrift.class", thrift.getName)
extra.put("thrift.descriptor", struct.toJSON)
new WriteContext(schema, extra)
}
def prepareForWrite(consumer: RecordConsumer): Unit = {
val io = new ColumnIOFactory().getColumnIO(schema)
parquetWriteProtocol = new ParquetWriteProtocol(consumer, io, struct)
}
def write(record: A): Unit =
record.write(parquetWriteProtocol)
}
object ScroogeWriteSupport {
val thriftClass = "parquet.scrooge.write.class";
def setAsParquetSupportClass[A <: ThriftStruct : Manifest](conf: JobConf) {
DeprecatedParquetOutputFormat.setWriteSupportClass(conf, classOf[ScroogeWriteSupport[_]])
ScroogeReadWriteSupport.setThriftClass[A](conf, thriftClass)
}
}
| CommBank/ebenezer | core/src/main/scala/au/com/cba/omnia/ebenezer/scrooge/ScroogeWriteSupport.scala | Scala | apache-2.0 | 2,677 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.licenses
import org.joda.time.DateTime
import com.normation.rudder.domain.licenses.NovaLicense
import com.normation.inventory.domain.NodeId
/**
* A service that handles Nova licenses files
* @author Nicolas CHARLES
*
*/
trait NovaLicenseService {
/**
* Return the license that holds this policy server
* @param server
* @return
*/
def findLicenseForNode(nodeId: NodeId) : Option[NovaLicense]
/**
* Add a license file
* @param uuid : the policy server that holds this license
* @param licenseNumber : number of hosts authorized
* @param expirationDate : expiration date of the licence
* @param file : path to the licence file
*/
// def saveLicenseFile(uuid : String, licenseNumber : Int, expirationDate : DateTime, file : String)
} | Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/services/licenses/NovaLicenseService.scala | Scala | agpl-3.0 | 2,489 |
package uk.gov.dvla.vehicles.acquire.gatling
import io.gatling.core.Predef._
import io.gatling.core.feeder._
object Scenarios {
def verifyAssetsAreAccessible = {
val noData = RecordSeqFeederBuilder[String](records = IndexedSeq.empty[Record[String]])
val chain = new Chains(noData)
scenario("Verify assets are accessible")
.exec(
chain.verifyAssetsAreAccessible
)
}
def newBusinessKeeperBuysAVehicleFromTheTrade = {
val data = csv("data/happy/NewBusinessKeeperBuysAVehicleFromTheTrade.csv").circular
val chain = new Chains(data)
scenario("New business keeper buys a vehicle from the motor trade from start to finish")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.traderDetailsPage,
chain.traderDetailsSubmit,
chain.businessChooseYourAddress,
chain.vehicleLookup,
chain.vehicleLookupSubmitNewBusinessKeeper,
chain.businessKeeperDetailsSubmit,
chain.newKeeperChooseYourAddressSubmit,
chain.taxOrSornVehicle,
chain.completeAndConfirmSubmit
)
)
}
def newBusinessKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn = {
val data = csv("data/happy/NewBusinessKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn.csv").circular
val chain = new Chains(data)
val name = "New business keeper buys a vehicle from the motor trade with all optional data filled in " +
"from start to finish"
scenario(name)
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.traderDetailsPage,
chain.traderDetailsSubmit,
chain.businessChooseYourAddress,
chain.vehicleLookup,
chain.vehicleLookupSubmitNewBusinessKeeper,
chain.businessKeeperDetailsSubmit,
chain.newKeeperChooseYourAddressSubmit,
chain.taxOrSornVehicle,
chain.completeAndConfirmSubmit
)
)
}
def newPrivateKeeperBuysAVehicleFromTheTrade = {
val data = csv("data/happy/NewPrivateKeeperBuysAVehicleFromTheTrade.csv").circular
val chain = new Chains(data)
scenario("New private keeper buys a vehicle from the motor trade from start to finish")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.traderDetailsPage,
chain.traderDetailsSubmit,
chain.businessChooseYourAddress,
chain.vehicleLookup,
chain.vehicleLookupSubmitNewPrivateKeeper,
chain.privateKeeperDetailsSubmit,
chain.newKeeperChooseYourAddressSubmit,
chain.taxOrSornVehicle,
chain.completeAndConfirmSubmit
)
)
}
def newPrivateKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn = {
val data = csv("data/happy/NewPrivateKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn.csv").circular
val chain = new Chains(data)
val name = "New private keeper buys a vehicle from the motor trade with all optional data filled in " +
"from start to finish"
scenario(name)
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.traderDetailsPage,
chain.traderDetailsSubmit,
chain.businessChooseYourAddress,
chain.vehicleLookup,
chain.vehicleLookupSubmitNewPrivateKeeper,
chain.privateKeeperDetailsSubmit,
chain.newKeeperChooseYourAddressSubmit,
chain.taxOrSornVehicle,
chain.completeAndConfirmSubmit
)
)
}
def vehicleLookupUnsuccessful = {
val data = csv("data/sad/VehicleLookupUnsuccessful.csv").circular
val chain = new Chains(data)
scenario("Vehicle lookup is unsuccessful")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.traderDetailsPage,
chain.traderDetailsSubmit,
chain.businessChooseYourAddress,
chain.vehicleLookup,
chain.vehicleLookupUnsuccessfulSubmit
)
)
}
}
| dvla/vehicles-acquire-online | gatling-tests/src/test/scala/uk/gov/dvla/vehicles/acquire/gatling/Scenarios.scala | Scala | mit | 4,021 |
/** This file is part of TextCompose, a program for producing PDF from text files.
* Copyright 2014 Jesper S Villadsen <jeschvi@gmail.com>
* License: GNU Affero General Public License version 3 or later.
* For full license text see LICENSE.txt or <http://www.gnu.org/licenses/>.
*/
package textcompose.core
object NumberFunctions {
val values = List(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
val numerals = List("M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I")
private def arabicToRoman(number: Int): String = {
var result = ""
var remainder = number
var index = 0
for (v <- values) {
while (remainder >= v) {
remainder -= v
result += numerals(index)
}
index += 1
}
result
}
def getRomanNumber(lowerCase: Boolean, number: Int): String = {
var result = ""
if (number < 0 || number > 3999) {
throw new TagError("Only supports presenting numbers between 0 and 3999 as Roman numbers")
} else if (number == 0) {
result = "N"
} else {
result = arabicToRoman(number)
}
if (lowerCase) result.toLowerCase else result
}
def getNumber(s: String): Int = {
if (s(0) == '#') {
try {
Integer.valueOf(s.substring(1), 16).intValue // HEX -> INT
} catch {
case e: Exception => throw new TagError("'" + s.substring(1) + "' is not a hexadecimal number.")
}
} else {
try {
s.toInt
} catch {
case e: Exception => throw new TagError("'" + s + "' is not a integer (in decimal notation).")
}
}
}
def getFloat(s: String, purpose: String): Float = {
try {
s.toFloat
} catch {
case e: Exception => throw new TagError(purpose + " should be a number. You wrote '" + s + "'.")
}
}
} | jvilladsen/TextCompose | src/main/scala/core/NumberFunctions.scala | Scala | agpl-3.0 | 1,825 |
package eu.dnetlib.iis.common.spark.avro
import org.apache.avro.Schema
import org.apache.spark.sql.avro.SchemaConverters
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
* Support for reading avro datastores as dataframes.
*
* @param spark SparkSession instance.
*/
class AvroDataFrameReader(val spark: SparkSession) extends Serializable {
/**
* Reads avro datastore as Spark dataframe using SQL schema.
*
* @param path Path to the datastore.
* @param schema SQL schema of the records.
* @return DataFrame with data read from given path.
*/
def read(path: String, schema: StructType): DataFrame = {
read(path, SchemaConverters.toAvroType(schema))
}
/**
* Reads avro datastore as Spark dataframe using avro schema.
*
* @param path Path to the data store.
* @param avroSchema Avro schema of the records.
* @return DataFrame with data read from given path.
*/
def read(path: String, avroSchema: Schema): DataFrame = {
val in = spark.read
.format("avro")
.option("avroSchema", avroSchema.toString)
.load(path)
spark.createDataFrame(in.rdd, SchemaConverters.toSqlType(avroSchema).dataType.asInstanceOf[StructType])
}
}
| openaire/iis | iis-common/src/main/scala/eu/dnetlib/iis/common/spark/avro/AvroDataFrameReader.scala | Scala | apache-2.0 | 1,262 |
package scalaprops
import scala.concurrent.duration.Duration
import sbt.testing.Logger
object TestExecutorImpl {
private[this] val instance = new TestExecutor {
override def execute[A](timeout: Duration)(f: => A): A = f
override def shutdown(): Unit = {}
}
def withExecutor[A](logger: Logger)(f: TestExecutor => A): A =
f(instance)
}
| scalaprops/scalaprops | scalaprops/js/src/main/scala/scalaprops/TestExecutorImpl.scala | Scala | mit | 354 |
package fs2
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import org.typelevel.discipline.Laws
import org.scalatest.{ Args, AsyncFreeSpec, FreeSpec, Matchers, Status, Suite }
import org.scalatest.concurrent.{ AsyncTimeLimitedTests, TimeLimitedTests }
import org.scalatest.prop.{ Checkers, GeneratorDrivenPropertyChecks }
import org.scalatest.time.Span
abstract class Fs2Spec extends FreeSpec with Fs2SpecLike with TimeLimitedTests with Checkers {
val timeLimit: Span = timeout
def checkAll(name: String, ruleSet: Laws#RuleSet): Unit =
for ((id, prop) ← ruleSet.all.properties)
s"${name}.${id}" in check(prop)
}
abstract class AsyncFs2Spec extends AsyncFreeSpec with Fs2SpecLike with AsyncTimeLimitedTests {
val timeLimit: Span = timeout
implicit override val executionContext: ExecutionContext = ExecutionContext.Implicits.global
}
trait Fs2SpecLike extends Suite
with GeneratorDrivenPropertyChecks
with Matchers {
implicit val timeout: FiniteDuration = 60.seconds
lazy val verbose: Boolean = sys.props.get("fs2.test.verbose").isDefined
implicit override val generatorDrivenConfig: PropertyCheckConfiguration =
PropertyCheckConfiguration(minSuccessful = 25, workers = 1)
override def runTest(testName: String, args: Args): Status = {
if (verbose) println("Starting " + testName)
try super.runTest(testName, args)
finally if (verbose) println("Finished " + testName)
}
}
| zaneli/fs2 | core/shared/src/test/scala/fs2/Fs2Spec.scala | Scala | mit | 1,460 |
package no.skytteren.elasticala.index
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import org.elasticsearch.action.ActionListener
import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder
import org.elasticsearch.action.admin.indices.refresh.{ RefreshResponse => EsRefreshResponse }
import org.elasticsearch.client.{ Client => EsClient }
import no.skytteren.elasticala.Executor
import no.skytteren.elasticala.Index
import no.skytteren.elasticala.Request
import no.skytteren.elasticala.Response
import org.elasticsearch.action.ShardOperationFailedException
case class RefreshRequest(index: Index) extends Request
case class RefreshResponse(totalShards: Int, successfulShards: Int, failedShards: Int, shardFailures: Seq[ShardOperationFailedException]) extends Response
class RefreshExecutor extends Executor[RefreshRequest, RefreshResponse] {
def execute(req: RefreshRequest, client: EsClient)(implicit ec: ExecutionContext): Future[RefreshResponse] = {
val promise = Promise[RefreshResponse]()
val listener = new ActionListener[EsRefreshResponse]{
override def onResponse(response: EsRefreshResponse): Unit = {
promise.success(RefreshResponse(response.getTotalShards, response.getSuccessfulShards, response.getFailedShards, response.getShardFailures.toSeq))
}
def onFailure(e: Throwable): Unit = promise.failure(e)
}
client.admin().indices().prepareRefresh(req.index.value).execute(listener)
promise.future
}
} | skytteren/elasticala | src/main/scala/no/skytteren/elasticala/index/Refresh.scala | Scala | apache-2.0 | 1,539 |
package com.arcusys.learn.liferay.util
import java.security.Key
/**
* User: Yulia.Glushonkova
* Date: 18.08.14
*/
object EncryptorUtilHelper {
def encrypt(key: Key, plainText: String) = com.liferay.util.Encryptor.encrypt(key, plainText)
}
| ViLPy/Valamis | learn-liferay620-services/src/main/scala/com/arcusys/learn/liferay/util/EncryptorUtilHelper.scala | Scala | lgpl-3.0 | 246 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package graph
import scala.runtime.BoxedUnit
import java.util.concurrent.atomic.AtomicLong
import scala.collection.JavaConversions._
import scala.collection.mutable
import org.apache.spark.Partitioner
import org.objectweb.asm.{ Opcodes, Type }
import org.objectweb.asm.signature.SignatureVisitor
import com.asakusafw.lang.compiler.api.reference.DataModelReference
import com.asakusafw.lang.compiler.extension.directio.{ DirectFileOutputModel, OutputPattern }
import com.asakusafw.lang.compiler.model.graph.{ ExternalOutput, Group }
import com.asakusafw.runtime.directio.DataFormat
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.runtime.value.{ StringOption, ValueOption }
import com.asakusafw.runtime.stage.directio.StringTemplate.Format
import com.asakusafw.spark.compiler.directio.OutputPatternGeneratorClassBuilder
import com.asakusafw.spark.compiler.graph.DirectOutputPrepareClassBuilder._
import com.asakusafw.spark.compiler.spi.NodeCompiler
import com.asakusafw.spark.compiler.util.SparkIdioms._
import com.asakusafw.spark.runtime.JobContext
import com.asakusafw.spark.runtime.directio.OutputPatternGenerator
import com.asakusafw.spark.runtime.directio.OutputPatternGenerator.Fragment
import com.asakusafw.spark.runtime.graph.{
Action,
DirectOutputPrepareFlat,
DirectOutputPrepareGroup,
SortOrdering,
Source
}
import com.asakusafw.spark.runtime.rdd.{ BranchKey, ShuffleKey }
import com.asakusafw.spark.tools.asm._
import com.asakusafw.spark.tools.asm.MethodBuilder._
import com.asakusafw.spark.tools.asm4s._
abstract class DirectOutputPrepareFlatClassBuilder(
operator: ExternalOutput)(
model: DirectFileOutputModel)(
val label: String)(
implicit val context: NodeCompiler.Context)
extends ClassBuilder(
Type.getType(
s"L${GeneratedClassPackageInternalName}/${context.flowId}/graph/DirectOutputPrepareFlat$$${nextId(flat = true)};"), // scalastyle:ignore
new ClassSignatureBuilder()
.newSuperclass {
_.newClassType(classOf[DirectOutputPrepareFlat[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, operator.getOperatorPort.dataModelType)
}
},
classOf[DirectOutputPrepareFlat[_]].asType)
with LabelField {
self: CacheStrategy =>
private val dataModelType = operator.getOperatorPort.dataModelType
override def defConstructors(ctorDef: ConstructorDef): Unit = {
ctorDef.newInit(Seq(
classOf[Action[Unit]].asType,
classOf[Seq[(Source, BranchKey)]].asType,
classOf[JobContext].asType),
new MethodSignatureBuilder()
.newParameterType {
_.newClassType(classOf[Action[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[BoxedUnit].asType)
}
}
.newParameterType {
_.newClassType(classOf[Seq[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF) {
_.newClassType(classOf[(_, _)].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[Source].asType)
.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[BranchKey].asType)
}
}
}
}
.newParameterType(classOf[JobContext].asType)
.newVoidReturnType()) { implicit mb =>
val thisVar :: setupVar :: prevsVar :: jobContextVar :: _ = mb.argVars
thisVar.push().invokeInit(
superType,
setupVar.push(),
prevsVar.push(),
manifest(dataModelType),
jobContextVar.push())
initMixIns()
}
}
override def defMethods(methodDef: MethodDef): Unit = {
super.defMethods(methodDef)
methodDef.newMethod("name", classOf[String].asType, Seq.empty) { implicit mb =>
`return`(ldc(operator.getName))
}
methodDef.newMethod("basePath", classOf[String].asType, Seq.empty) { implicit mb =>
`return`(ldc(model.getBasePath))
}
methodDef.newMethod("resourcePattern", classOf[String].asType, Seq.empty) { implicit mb =>
`return`(ldc(model.getResourcePattern))
}
methodDef.newMethod("formatType", classOf[Class[_ <: DataFormat[_]]].asType, Seq.empty,
new MethodSignatureBuilder()
.newReturnType {
_.newClassType(classOf[Class[_]].asType) {
_.newTypeArgument(SignatureVisitor.EXTENDS) {
_.newClassType(classOf[DataFormat[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, dataModelType)
}
}
}
}) { implicit mb =>
`return`(ldc(model.getFormatClass.asType).asType(classOf[Class[_]].asType))
}
}
}
abstract class DirectOutputPrepareGroupClassBuilder(
operator: ExternalOutput)(
pattern: OutputPattern,
model: DirectFileOutputModel)(
val label: String)(
implicit val context: NodeCompiler.Context)
extends ClassBuilder(
Type.getType(
s"L${GeneratedClassPackageInternalName}/${context.flowId}/graph/DirectOutputPrepareGroup$$${nextId(flat = false)};"), // scalastyle:ignore
new ClassSignatureBuilder()
.newSuperclass {
_.newClassType(classOf[DirectOutputPrepareGroup[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, operator.getDataType.asType)
}
},
classOf[DirectOutputPrepareGroup[_]].asType)
with LabelField {
self: CacheStrategy =>
private val dataModelRef = operator.getOperatorPort.dataModelRef
private val dataModelType = operator.getOperatorPort.dataModelType
override def defFields(fieldDef: FieldDef): Unit = {
super.defFields(fieldDef)
fieldDef.newField(
Opcodes.ACC_PRIVATE | Opcodes.ACC_TRANSIENT,
"outputPatternGenerator",
classOf[OutputPatternGenerator[_]].asType,
new TypeSignatureBuilder()
.newClassType(classOf[OutputPatternGenerator[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, dataModelType)
})
fieldDef.newField(
Opcodes.ACC_PRIVATE | Opcodes.ACC_TRANSIENT,
"sortOrdering",
classOf[SortOrdering].asType,
new TypeSignatureBuilder()
.newClassType(classOf[Ordering[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[ShuffleKey].asType)
})
}
override def defConstructors(ctorDef: ConstructorDef): Unit = {
ctorDef.newInit(Seq(
classOf[Action[Unit]].asType,
classOf[Seq[(Source, BranchKey)]].asType,
classOf[Partitioner].asType,
classOf[JobContext].asType),
new MethodSignatureBuilder()
.newParameterType {
_.newClassType(classOf[Action[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[BoxedUnit].asType)
}
}
.newParameterType {
_.newClassType(classOf[Seq[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF) {
_.newClassType(classOf[(_, _)].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[Source].asType)
.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[BranchKey].asType)
}
}
}
}
.newParameterType(classOf[Partitioner].asType)
.newParameterType(classOf[JobContext].asType)
.newVoidReturnType()) { implicit mb =>
val thisVar :: setupVar :: prevsVar :: partVar :: jobContextVar :: _ = mb.argVars
thisVar.push().invokeInit(
superType,
setupVar.push(),
prevsVar.push(),
partVar.push(),
manifest(dataModelType),
jobContextVar.push())
initMixIns()
}
}
override def defMethods(methodDef: MethodDef): Unit = {
super.defMethods(methodDef)
methodDef.newMethod("name", classOf[String].asType, Seq.empty) { implicit mb =>
`return`(ldc(operator.getName))
}
methodDef.newMethod("basePath", classOf[String].asType, Seq.empty) { implicit mb =>
`return`(ldc(model.getBasePath))
}
methodDef.newMethod("formatType", classOf[Class[_ <: DataFormat[_]]].asType, Seq.empty,
new MethodSignatureBuilder()
.newReturnType {
_.newClassType(classOf[Class[_]].asType) {
_.newTypeArgument(SignatureVisitor.EXTENDS) {
_.newClassType(classOf[DataFormat[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, operator.getDataType.asType)
}
}
}
}) { implicit mb =>
`return`(ldc(model.getFormatClass.asType).asType(classOf[Class[_]].asType))
}
methodDef.newMethod(
"newDataModel", classOf[DataModel[_]].asType, Seq.empty) { implicit mb =>
val thisVar :: _ = mb.argVars
`return`(thisVar.push().invokeV("newDataModel", dataModelType))
}
methodDef.newMethod(
"newDataModel", dataModelType, Seq.empty) { implicit mb =>
`return`(pushNew0(dataModelType))
}
methodDef.newMethod(
"outputPatternGenerator", classOf[OutputPatternGenerator[_]].asType, Seq.empty,
new MethodSignatureBuilder()
.newReturnType {
_.newClassType(classOf[OutputPatternGenerator[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, dataModelType)
}
}) { implicit mb =>
val thisVar :: _ = mb.argVars
thisVar.push().getField(
"outputPatternGenerator", classOf[OutputPatternGenerator[_]].asType).unlessNotNull {
thisVar.push().putField(
"outputPatternGenerator", {
val generator =
pushNew(OutputPatternGeneratorClassBuilder.getOrCompile(dataModelRef))
generator.dup().invokeInit(
buildSeq { builder =>
var randoms = 0L
pattern.getResourcePattern.foreach { segment =>
segment.getKind match {
case OutputPattern.SourceKind.NOTHING =>
builder +=
pushObject(OutputPatternGenerator)
.invokeV("constant", classOf[Fragment].asType,
ldc(segment.getArgument))
case OutputPattern.SourceKind.PROPERTY =>
segment.getFormat match {
case Format.NATURAL =>
builder +=
pushObject(OutputPatternGenerator)
.invokeV("natural", classOf[Fragment].asType,
ldc(segment.getTarget.getName.toMemberName))
case f @ (Format.BYTE | Format.SHORT | Format.INT | Format.LONG
| Format.FLOAT | Format.DOUBLE | Format.DECIMAL
| Format.DATE | Format.DATETIME) =>
builder +=
pushObject(OutputPatternGenerator)
.invokeV(f.name.toLowerCase, classOf[Fragment].asType,
ldc(segment.getTarget.getName.toMemberName),
ldc(segment.getArgument))
case _ =>
throw new AssertionError(
s"Unknown StringTemplate.Format: ${segment.getFormat}")
}
case OutputPattern.SourceKind.RANDOM =>
builder +=
pushObject(OutputPatternGenerator)
.invokeV("random", classOf[Fragment].asType,
ldc(0xcafebabe + randoms * 31),
ldc(segment.getRandomNumber.getLowerBound),
ldc(segment.getRandomNumber.getUpperBound))
randoms += 1
case _ =>
throw new AssertionError(
s"Unknown OutputPattern.SourceKind: ${segment.getKind}")
}
}
})
generator.asType(classOf[OutputPatternGenerator[_]].asType)
})
}
`return`(
thisVar.push().getField(
"outputPatternGenerator", classOf[OutputPatternGenerator[_]].asType))
}
methodDef.newMethod(
"sortOrdering", classOf[SortOrdering].asType, Seq.empty,
new MethodSignatureBuilder()
.newReturnType {
_.newClassType(classOf[Ordering[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, classOf[ShuffleKey].asType)
}
}) { implicit mb =>
val thisVar :: _ = mb.argVars
thisVar.push().getField("sortOrdering", classOf[SortOrdering].asType).unlessNotNull {
thisVar.push().putField(
"sortOrdering",
sortOrdering(
Seq(classOf[StringOption].asType),
dataModelRef.orderingTypes(
pattern.getOrders.map { order =>
new Group.Ordering(
order.getTarget.getName,
if (order.isAscend) Group.Direction.ASCENDANT else Group.Direction.DESCENDANT)
})))
}
`return`(thisVar.push().getField("sortOrdering", classOf[SortOrdering].asType))
}
methodDef.newMethod(
"orderings", classOf[Seq[ValueOption[_]]].asType, Seq(classOf[DataModel[_]].asType),
new MethodSignatureBuilder()
.newParameterType {
_.newClassType(classOf[DataModel[_]].asType) {
_.newTypeArgument()
}
}
.newReturnType {
_.newClassType(classOf[Seq[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF) {
_.newClassType(classOf[ValueOption[_]].asType) {
_.newTypeArgument()
}
}
}
}) { implicit mb =>
val thisVar :: dataModelVar :: _ = mb.argVars
`return`(
thisVar.push().invokeV("orderings", classOf[Seq[ValueOption[_]]].asType,
dataModelVar.push().cast(dataModelType)))
}
methodDef.newMethod(
"orderings", classOf[Seq[ValueOption[_]]].asType, Seq(dataModelType),
new MethodSignatureBuilder()
.newParameterType(dataModelType)
.newReturnType {
_.newClassType(classOf[Seq[_]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF) {
_.newClassType(classOf[ValueOption[_]].asType) {
_.newTypeArgument()
}
}
}
}) { implicit mb =>
val thisVar :: dataModelVar :: _ = mb.argVars
`return`(
buildSeq { builder =>
pattern.getOrders.foreach { order =>
val property = dataModelRef.findProperty(order.getTarget.getName)
builder +=
dataModelVar.push().invokeV(
property.getDeclaration.getName, property.getType.asType)
}
})
}
}
}
object DirectOutputPrepareClassBuilder {
private[this] val curIds: mutable.Map[NodeCompiler.Context, (AtomicLong, AtomicLong)] =
mutable.WeakHashMap.empty
def nextId(flat: Boolean)(implicit context: NodeCompiler.Context): Long = {
val ids = curIds.getOrElseUpdate(context, (new AtomicLong(0L), new AtomicLong(0)))
(if (flat) ids._1 else ids._2).getAndIncrement()
}
}
| ashigeru/asakusafw-spark | compiler/src/main/scala/com/asakusafw/spark/compiler/graph/DirectOutputPrepareClassBuilder.scala | Scala | apache-2.0 | 16,162 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen
import org.apache.flink.api.common.io.GenericInputFormat
import org.apache.flink.table.data.GenericRowData
import org.apache.flink.table.planner.codegen.CodeGenUtils.newName
import org.apache.flink.table.planner.codegen.Indenter.toISC
import org.apache.flink.table.runtime.generated.GeneratedInput
import org.apache.flink.table.types.logical.LogicalType
/**
* A code generator for generating Flink [[GenericInputFormat]]s.
*/
object InputFormatCodeGenerator {
/**
* Generates a values input format that can be passed to Java compiler.
*
* @param ctx The code generator context
* @param name Class name of the input format. Must not be unique but has to be a
* valid Java class identifier.
* @param records code for creating records
* @param returnType expected return type
* @param outRecordTerm term of the output
* @tparam T Return type of the Flink Function.
* @return instance of GeneratedFunction
*/
def generateValuesInputFormat[T](
ctx: CodeGeneratorContext,
name: String,
records: Seq[String],
returnType: LogicalType,
outRecordTerm: String = CodeGenUtils.DEFAULT_OUT_RECORD_TERM,
outRecordWriterTerm: String = CodeGenUtils.DEFAULT_OUT_RECORD_WRITER_TERM)
: GeneratedInput[GenericInputFormat[T]] = {
val funcName = newName(name)
ctx.addReusableOutputRecord(returnType, classOf[GenericRowData], outRecordTerm,
Some(outRecordWriterTerm))
val funcCode = j"""
public class $funcName extends ${classOf[GenericInputFormat[_]].getCanonicalName} {
private int nextIdx = 0;
${ctx.reuseMemberCode()}
public $funcName(Object[] references) throws Exception {
${ctx.reuseInitCode()}
}
@Override
public boolean reachedEnd() throws java.io.IOException {
return nextIdx >= ${records.length};
}
@Override
public Object nextRecord(Object reuse) {
${records.zipWithIndex.map { case (r, i) =>
s"""
|if (nextIdx == $i) {
| $r
| nextIdx++;
| return $outRecordTerm;
|}
|""".stripMargin
}.mkString("")}
throw new IllegalStateException(
"Invalid nextIdx " + nextIdx + ". This is a bug. Please file an issue");
}
}
""".stripMargin
new GeneratedInput(
funcName, funcCode, ctx.references.toArray, ctx.tableConfig.getConfiguration)
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/InputFormatCodeGenerator.scala | Scala | apache-2.0 | 3,397 |
/*
* Copyright (c) 2013 University of Massachusetts Amherst
* Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*/
package edu.umass.cs.iesl.bibmogrify
class BibMogrifyException(message: String) extends Exception(message)
| iesl/bibmogrify | src/main/scala/edu/umass/cs/iesl/bibmogrify/BibMogrifyException.scala | Scala | apache-2.0 | 276 |
object Main extends App {
com.abajar.crrcsimeditor.CRRCsimEditor.main(args)
}
| monkeypants/avl-crrcsim-editor | src/main/scala/Main.scala | Scala | gpl-2.0 | 80 |
package org.jetbrains.plugins.scala
package lang
package scaladoc
package psi
package impl
import _root_.org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import api.ScDocParamRef
import com.intellij.lang.ASTNode
/**
* User: Alexander Podkhalyuzin
* Date: 22.07.2008
*/
class ScDocParamRefImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScDocParamRef{
override def toString: String = "ScDocParamRef"
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/scaladoc/psi/impl/ScDocParamRefImpl.scala | Scala | apache-2.0 | 429 |
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.actor
object NonPublicClass {
def createProps(): Props =
Props(new MyNonPublicActorClass())
}
class MyNonPublicActorClass extends Actor {
def receive = {
case msg => sender() ! msg
}
}
| unicredit/akka.js | akka-js-actor-tests/js/src/test/scala/akka/actor/NonPublicClass.scala | Scala | bsd-3-clause | 292 |
package stocks
import akka.NotUsed
import akka.stream.ThrottleMode
import akka.stream.scaladsl.Source
import scala.concurrent.duration._
/**
* A stock is a source of stock quotes and a symbol.
*/
class Stock(val symbol: StockSymbol) {
private val stockQuoteGenerator: StockQuoteGenerator = new FakeStockQuoteGenerator(symbol)
private val source: Source[StockQuote, NotUsed] = {
Source.unfold(stockQuoteGenerator.seed) { (last: StockQuote) =>
val next = stockQuoteGenerator.newQuote(last)
Some(next, next)
}
}
/**
* Returns a source of stock history, containing a single element.
*/
def history(n: Int): Source[StockHistory, NotUsed] = {
source.grouped(n).map(sq => new StockHistory(symbol, sq.map(_.price))).take(1)
}
/**
* Provides a source that returns a stock quote every 75 milliseconds.
*/
def update: Source[StockUpdate, NotUsed] = {
source
.throttle(elements = 1, per = 75.millis, maximumBurst = 1, ThrottleMode.shaping)
.map(sq => new StockUpdate(sq.symbol, sq.price))
}
override val toString: String = s"Stock($symbol)"
}
trait StockQuoteGenerator {
def seed: StockQuote
def newQuote(lastQuote: StockQuote): StockQuote
}
class FakeStockQuoteGenerator(symbol: StockSymbol) extends StockQuoteGenerator {
private def random: Double = scala.util.Random.nextDouble
def seed: StockQuote = {
StockQuote(symbol, StockPrice(random * 800))
}
def newQuote(lastQuote: StockQuote): StockQuote = {
StockQuote(symbol, StockPrice(lastQuote.price.raw * (0.95 + (0.1 * random))))
}
}
case class StockQuote(symbol: StockSymbol, price: StockPrice)
/** Value class for a stock symbol */
class StockSymbol private (val raw: String) extends AnyVal {
override def toString: String = raw
}
object StockSymbol {
import play.api.libs.json._ // Combinator syntax
def apply(raw: String) = new StockSymbol(raw)
implicit val stockSymbolReads: Reads[StockSymbol] = {
JsPath.read[String].map(StockSymbol(_))
}
implicit val stockSymbolWrites: Writes[StockSymbol] = Writes {
(symbol: StockSymbol) => JsString(symbol.raw)
}
}
/** Value class for stock price */
class StockPrice private (val raw: Double) extends AnyVal {
override def toString: String = raw.toString
}
object StockPrice {
import play.api.libs.json._ // Combinator syntax
def apply(raw: Double):StockPrice = new StockPrice(raw)
implicit val stockPriceWrites: Writes[StockPrice] = Writes {
(price: StockPrice) => JsNumber(price.raw)
}
}
// Used for automatic JSON conversion
// https://www.playframework.com/documentation/2.7.x/ScalaJson
// JSON presentation class for stock history
case class StockHistory(symbol: StockSymbol, prices: Seq[StockPrice])
object StockHistory {
import play.api.libs.json._ // Combinator syntax
implicit val stockHistoryWrites: Writes[StockHistory] = new Writes[StockHistory] {
override def writes(history: StockHistory): JsValue = Json.obj(
"type" -> "stockhistory",
"symbol" -> history.symbol,
"history" -> history.prices
)
}
}
// JSON presentation class for stock update
case class StockUpdate(symbol: StockSymbol, price: StockPrice)
object StockUpdate {
import play.api.libs.json._ // Combinator syntax
implicit val stockUpdateWrites: Writes[StockUpdate] = new Writes[StockUpdate] {
override def writes(update: StockUpdate): JsValue = Json.obj(
"type" -> "stockupdate",
"symbol" -> update.symbol,
"price" -> update.price
)
}
}
| play2-maven-plugin/play2-maven-test-projects | play27/scala/websocket-example-using-webjars-assets/app/stocks/Stock.scala | Scala | apache-2.0 | 3,521 |
package com.github.mdr.graphospasm.grapheditor.part
import com.github.mdr.graphospasm.grapheditor.utils.Utils._
import com.github.mdr.graphospasm.grapheditor.figure._
import com.github.mdr.graphospasm.grapheditor.model.commands._
import org.eclipse.gef.editpolicies.DirectEditPolicy
import org.eclipse.gef.commands.Command
import org.eclipse.gef.editpolicies.DirectEditPolicy
import org.eclipse.gef.requests.DirectEditRequest
import org.eclipse.gef.EditPart
class AttributeValueDirectEditPolicy extends DirectEditPolicy {
def getDirectEditCommand(edit: DirectEditRequest): Command = {
val text = edit.getCellEditor.getValue.toString
val attributeValue = getHost.asInstanceOf[AttributeValueEditPart].getModel
val newValue = SetAttributeValueCommand.getNewValue(attributeValue.value, text)
new SetAttributeValueCommand(attributeValue, newValue)
}
def showCurrentEditValue(request: DirectEditRequest) {
// val value = request.getCellEditor.getValue.toString
// getHostFigure.asInstanceOf[AttributeValueFigure].name = value
// getHostFigure.getUpdateManager.performUpdate
}
}
| mdr/graphospasm | com.github.mdr.graphospasm.grapheditor/src/main/scala/com/github/mdr/graphospasm/grapheditor/part/AttributeValueDirectEditPolicy.scala | Scala | mit | 1,122 |
/**
* Copyright (c) 2013-2016 Extended Mind Technologies Oy
*
* This file is part of Extended Mind.
*
* Extended Mind is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.extendedmind.test
import scala.Array.canBuildFrom
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.BeforeAndAfter
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
abstract class SpecBase extends FunSpec
with BeforeAndAfter with ShouldMatchers
with MockitoSugar{
def bytes2hex(bytes: Array[Byte], sep: Option[String] = None): String = {
sep match {
case None => bytes.map("%02x".format(_)).mkString
case _ => bytes.map("%02x".format(_)).mkString(sep.get)
}
}
} | ttiurani/extendedmind | backend/src/test/scala/org/extendedmind/test/SpecBase.scala | Scala | agpl-3.0 | 1,423 |
import scala.quoted.*
import math.Numeric.Implicits.infixNumericOps
inline def power[Num](x: Num, inline n: Int)(using num: Numeric[Num]) = ${powerCode('x, 'n)(using 'num)}
private def powerCode[Num: Type](x: Expr[Num], n: Expr[Int])(using Expr[Numeric[Num]])(using Quotes): Expr[Num] =
powerCode(x, n.valueOrAbort)
private def powerCode[Num: Type](x: Expr[Num], n: Int)(using num: Expr[Numeric[Num]])(using Quotes): Expr[Num] =
if (n == 0) '{ $num.one }
else if (n % 2 == 0) '{
given Numeric[Num] = $num
val y = $x * $x
${ powerCode('y, n / 2) }
}
else '{
given Numeric[Num] = $num
$x * ${powerCode(x, n - 1)}
}
| dotty-staging/dotty | tests/pos-macros/power-macro-2/Macro_1.scala | Scala | apache-2.0 | 651 |
package mesosphere.marathon.core.launchqueue.impl
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.base.{ Clock, ShutdownHooks }
import mesosphere.marathon.core.launchqueue.{ LaunchQueueConfig, LaunchQueueModule }
import mesosphere.marathon.core.leadership.AlwaysElectedLeadershipModule
import mesosphere.marathon.core.matcher.DummyOfferMatcherManager
import mesosphere.marathon.core.task.bus.TaskBusModule
import mesosphere.marathon.integration.setup.WaitTestSupport
import mesosphere.marathon.state.{ AppRepository, PathId }
import mesosphere.marathon.tasks.TaskFactory.CreatedTask
import mesosphere.marathon.tasks.{ TaskFactory, TaskIdUtil, TaskTracker }
import mesosphere.marathon.{ MarathonSpec, MarathonTestHelper }
import mesosphere.util.state.PersistentEntity
import org.apache.mesos.Protos.TaskID
import org.mockito.Mockito
import org.mockito.Mockito.{ when => call, _ }
import org.scalatest.{ BeforeAndAfter, GivenWhenThen }
import scala.concurrent.{ Future, Await }
import scala.concurrent.duration._
class LaunchQueueModuleTest extends MarathonSpec with BeforeAndAfter with GivenWhenThen {
test("empty queue returns no results") {
When("querying queue")
val apps = taskQueue.list
Then("no apps are returned")
assert(apps.isEmpty)
}
test("An added queue item is returned in list") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its contents")
val list = taskQueue.list
Then("we get back the added app")
assert(list.size == 1)
assert(list.head.app == app)
assert(list.head.tasksLeftToLaunch == 1)
assert(list.head.tasksLaunchedOrRunning == 0)
assert(list.head.taskLaunchesInFlight == 0)
verify(taskTracker).get(app.id)
}
test("An added queue item is reflected via count") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker).get(app.id)
}
test("A purged queue item has a count of 0") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 0")
assert(count == 0)
verify(taskTracker).get(app.id)
}
test("A re-added queue item has a count of 1") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker, times(2)).get(app.id)
}
test("adding a queue item registers new offer matcher") {
Given("An empty task tracker")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
When("Adding an app to the taskQueue")
taskQueue.add(app)
Then("A new offer matcher gets registered")
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
verify(taskTracker).get(app.id)
}
test("purging a queue item UNregisters offer matcher") {
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("The app is purged")
taskQueue.purge(app.id)
Then("No offer matchers remain registered")
assert(offerMatcherManager.offerMatchers.isEmpty)
verify(taskTracker).get(app.id)
}
test("an offer gets unsuccessfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(None)
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks == Seq.empty)
verify(taskTracker).get(app.id)
}
test("an offer gets successfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
val taskId: TaskID = TaskIdUtil.newTaskId(app.id)
val mesosTask = MarathonTestHelper.makeOneCPUTask("").setTaskId(taskId).build()
val marathonTask = MarathonTask.newBuilder().setId(taskId.getValue).build()
val createdTask = CreatedTask(mesosTask, marathonTask)
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(Some(createdTask))
call(taskTracker.store(app.id, marathonTask)).thenReturn(Future.successful(mock[PersistentEntity]))
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks.map(_.taskInfo) == Seq(mesosTask))
verify(taskTracker).get(app.id)
verify(taskTracker).created(app.id, marathonTask)
verify(taskTracker).store(app.id, marathonTask)
}
test("an offer gets successfully matched against an item in the queue BUT storing fails") {
val offer = MarathonTestHelper.makeBasicOffer().build()
val taskId: TaskID = TaskIdUtil.newTaskId(app.id)
val mesosTask = MarathonTestHelper.makeOneCPUTask("").setTaskId(taskId).build()
val marathonTask = MarathonTask.newBuilder().setId(taskId.getValue).build()
val createdTask = CreatedTask(mesosTask, marathonTask)
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(Some(createdTask))
call(taskTracker.store(app.id, marathonTask)).thenReturn(Future.failed(new RuntimeException("storing failed")))
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer ")
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory but not included in the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks.isEmpty)
verify(taskTracker).get(app.id)
verify(taskTracker).created(app.id, marathonTask)
verify(taskTracker).store(app.id, marathonTask)
verify(taskTracker, Mockito.timeout(1000)).terminated(app.id, marathonTask.getId)
}
private[this] val app = MarathonTestHelper.makeBasicApp().copy(id = PathId("/app"))
private[this] var shutdownHooks: ShutdownHooks = _
private[this] var clock: Clock = _
private[this] var taskBusModule: TaskBusModule = _
private[this] var offerMatcherManager: DummyOfferMatcherManager = _
private[this] var appRepository: AppRepository = _
private[this] var taskTracker: TaskTracker = _
private[this] var taskFactory: TaskFactory = _
private[this] var module: LaunchQueueModule = _
private[this] def taskQueue = module.taskQueue
before {
shutdownHooks = ShutdownHooks()
clock = Clock()
taskBusModule = new TaskBusModule()
offerMatcherManager = new DummyOfferMatcherManager()
taskTracker = mock[TaskTracker]("taskTracker")
taskFactory = mock[TaskFactory]("taskFactory")
appRepository = mock[AppRepository]("appRepository")
val config: LaunchQueueConfig = new LaunchQueueConfig {}
config.afterInit()
module = new LaunchQueueModule(
config,
AlwaysElectedLeadershipModule(shutdownHooks),
clock,
subOfferMatcherManager = offerMatcherManager,
taskStatusObservables = taskBusModule.taskStatusObservables,
appRepository,
taskTracker,
taskFactory
)
}
after {
verifyNoMoreInteractions(appRepository)
verifyNoMoreInteractions(taskTracker)
verifyNoMoreInteractions(taskFactory)
shutdownHooks.shutdown()
}
}
| MrMarvin/marathon | src/test/scala/mesosphere/marathon/core/launchqueue/impl/LaunchQueueModuleTest.scala | Scala | apache-2.0 | 9,326 |
import sbt._
import uk.gov.hmrc.SbtAutoBuildPlugin
import uk.gov.hmrc.sbtdistributables.SbtDistributablesPlugin
import uk.gov.hmrc.versioning.SbtGitVersioning
object MicroServiceBuild extends Build with MicroService {
override val appName = "mdtp-api-microservice"
override lazy val plugins: Seq[Plugins] = Seq(
SbtAutoBuildPlugin, SbtGitVersioning, SbtDistributablesPlugin
)
override lazy val appDependencies: Seq[ModuleID] = AppDependencies()
}
private object AppDependencies {
import play.core.PlayVersion
private val microserviceBootstrapVersion = "1.4.0"
private val playHealthVersion = "1.1.0"
private val playConfigVersion = "1.2.0"
private val hmrcTestVersion = "1.2.0"
private val playReactivemongoVersion = "4.1.0"
private val simpleReactivemongoVersion = "3.1.2"
val compile = Seq(
"uk.gov.hmrc" %% "play-reactivemongo" % playReactivemongoVersion,
"uk.gov.hmrc" %% "simple-reactivemongo" % simpleReactivemongoVersion,
"uk.gov.hmrc" %% "microservice-bootstrap" % microserviceBootstrapVersion,
"uk.gov.hmrc" %% "play-health" % playHealthVersion,
"uk.gov.hmrc" %% "play-config" % playConfigVersion,
"uk.gov.hmrc" %% "play-json-logger" % "1.0.0"
)
abstract class TestDependencies(scope: String) {
lazy val test : Seq[ModuleID] = Seq(
"uk.gov.hmrc" %% "hmrctest" % hmrcTestVersion % scope,
"org.scalatest" %% "scalatest" % "2.2.2" % scope,
"org.pegdown" % "pegdown" % "1.4.2" % scope,
"com.typesafe.play" %% "play-test" % PlayVersion.current % scope
)
}
object Test extends TestDependencies("test")
object IntegrationTest extends TestDependencies("it")
def apply() = compile ++ Test.test ++ IntegrationTest.test
}
| trasahin/mdtp-api-microservice | project/MicroServiceBuild.scala | Scala | apache-2.0 | 1,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.SparkException
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}
import org.apache.spark.ml.param.shared.HasRawPredictionCol
import org.apache.spark.ml.util.{MetadataUtils, SchemaUtils}
import org.apache.spark.mllib.linalg.{Vector, VectorUDT}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DataType, DoubleType, StructType}
/**
* (private[spark]) Params for classification.
*/
private[spark] trait ClassifierParams
extends PredictorParams with HasRawPredictionCol {
override protected def validateAndTransformSchema(
schema: StructType,
fitting: Boolean,
featuresDataType: DataType): StructType = {
val parentSchema = super.validateAndTransformSchema(schema, fitting, featuresDataType)
SchemaUtils.appendColumn(parentSchema, $(rawPredictionCol), new VectorUDT)
}
}
/**
* :: DeveloperApi ::
*
* Single-label binary or multiclass classification.
* Classes are indexed {0, 1, ..., numClasses - 1}.
*
* @tparam FeaturesType Type of input features. E.g., [[Vector]]
* @tparam E Concrete Estimator type
* @tparam M Concrete Model type
*/
@DeveloperApi
abstract class Classifier[
FeaturesType,
E <: Classifier[FeaturesType, E, M],
M <: ClassificationModel[FeaturesType, M]]
extends Predictor[FeaturesType, E, M] with ClassifierParams {
/** @group setParam */
def setRawPredictionCol(value: String): E = set(rawPredictionCol, value).asInstanceOf[E]
// TODO: defaultEvaluator (follow-up PR)
/**
* Extract [[labelCol]] and [[featuresCol]] from the given dataset,
* and put it in an RDD with strong types.
*
* @param dataset DataFrame with columns for labels ([[org.apache.spark.sql.types.NumericType]])
* and features ([[Vector]]). Labels are cast to [[DoubleType]].
* @param numClasses Number of classes label can take. Labels must be integers in the range
* [0, numClasses).
* @throws SparkException if any label is not an integer >= 0
*/
protected def extractLabeledPoints(dataset: Dataset[_], numClasses: Int): RDD[LabeledPoint] = {
require(numClasses > 0, s"Classifier (in extractLabeledPoints) found numClasses =" +
s" $numClasses, but requires numClasses > 0.")
dataset.select(col($(labelCol)).cast(DoubleType), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) =>
require(label % 1 == 0 && label >= 0 && label < numClasses, s"Classifier was given" +
s" dataset with invalid label $label. Labels must be integers in range" +
s" [0, 1, ..., $numClasses), where numClasses=$numClasses.")
LabeledPoint(label, features)
}
}
/**
* Get the number of classes. This looks in column metadata first, and if that is missing,
* then this assumes classes are indexed 0,1,...,numClasses-1 and computes numClasses
* by finding the maximum label value.
*
* Label validation (ensuring all labels are integers >= 0) needs to be handled elsewhere,
* such as in [[extractLabeledPoints()]].
*
* @param dataset Dataset which contains a column [[labelCol]]
* @param maxNumClasses Maximum number of classes allowed when inferred from data. If numClasses
* is specified in the metadata, then maxNumClasses is ignored.
* @return number of classes
* @throws IllegalArgumentException if metadata does not specify numClasses, and the
* actual numClasses exceeds maxNumClasses
*/
protected def getNumClasses(dataset: Dataset[_], maxNumClasses: Int = 100): Int = {
MetadataUtils.getNumClasses(dataset.schema($(labelCol))) match {
case Some(n: Int) => n
case None =>
// Get number of classes from dataset itself.
val maxLabelRow: Array[Row] = dataset.select(max($(labelCol))).take(1)
if (maxLabelRow.isEmpty) {
throw new SparkException("ML algorithm was given empty dataset.")
}
val maxDoubleLabel: Double = maxLabelRow.head.getDouble(0)
require((maxDoubleLabel + 1).isValidInt, s"Classifier found max label value =" +
s" $maxDoubleLabel but requires integers in range [0, ... ${Int.MaxValue})")
val numClasses = maxDoubleLabel.toInt + 1
require(numClasses <= maxNumClasses, s"Classifier inferred $numClasses from label values" +
s" in column $labelCol, but this exceeded the max numClasses ($maxNumClasses) allowed" +
s" to be inferred from values. To avoid this error for labels with > $maxNumClasses" +
s" classes, specify numClasses explicitly in the metadata; this can be done by applying" +
s" StringIndexer to the label column.")
logInfo(this.getClass.getCanonicalName + s" inferred $numClasses classes for" +
s" labelCol=$labelCol since numClasses was not specified in the column metadata.")
numClasses
}
}
}
/**
* :: DeveloperApi ::
*
* Model produced by a [[Classifier]].
* Classes are indexed {0, 1, ..., numClasses - 1}.
*
* @tparam FeaturesType Type of input features. E.g., [[Vector]]
* @tparam M Concrete Model type
*/
@DeveloperApi
abstract class ClassificationModel[FeaturesType, M <: ClassificationModel[FeaturesType, M]]
extends PredictionModel[FeaturesType, M] with ClassifierParams {
/** @group setParam */
def setRawPredictionCol(value: String): M = set(rawPredictionCol, value).asInstanceOf[M]
/** Number of classes (values which the label can take). */
def numClasses: Int
/**
* Transforms dataset by reading from [[featuresCol]], and appending new columns as specified by
* parameters:
* - predicted labels as [[predictionCol]] of type [[Double]]
* - raw predictions (confidences) as [[rawPredictionCol]] of type [[Vector]].
*
* @param dataset input dataset
* @return transformed dataset
*/
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
// Output selected columns only.
// This is a bit complicated since it tries to avoid repeated computation.
var outputData = dataset
var numColsOutput = 0
if (getRawPredictionCol != "") {
val predictRawUDF = udf { (features: Any) =>
predictRaw(features.asInstanceOf[FeaturesType])
}
outputData = outputData.withColumn(getRawPredictionCol, predictRawUDF(col(getFeaturesCol)))
numColsOutput += 1
}
if (getPredictionCol != "") {
val predUDF = if (getRawPredictionCol != "") {
udf(raw2prediction _).apply(col(getRawPredictionCol))
} else {
val predictUDF = udf { (features: Any) =>
predict(features.asInstanceOf[FeaturesType])
}
predictUDF(col(getFeaturesCol))
}
outputData = outputData.withColumn(getPredictionCol, predUDF)
numColsOutput += 1
}
if (numColsOutput == 0) {
logWarning(s"$uid: ClassificationModel.transform() was called as NOOP" +
" since no output columns were set.")
}
outputData.toDF
}
/**
* Predict label for the given features.
* This internal method is used to implement [[transform()]] and output [[predictionCol]].
*
* This default implementation for classification predicts the index of the maximum value
* from [[predictRaw()]].
*/
override protected def predict(features: FeaturesType): Double = {
raw2prediction(predictRaw(features))
}
/**
* Raw prediction for each possible label.
* The meaning of a "raw" prediction may vary between algorithms, but it intuitively gives
* a measure of confidence in each possible label (where larger = more confident).
* This internal method is used to implement [[transform()]] and output [[rawPredictionCol]].
*
* @return vector where element i is the raw prediction for label i.
* This raw prediction may be any real number, where a larger value indicates greater
* confidence for that label.
*/
protected def predictRaw(features: FeaturesType): Vector
/**
* Given a vector of raw predictions, select the predicted label.
* This may be overridden to support thresholds which favor particular labels.
* @return predicted label
*/
protected def raw2prediction(rawPrediction: Vector): Double = rawPrediction.argmax
}
| xieguobin/Spark_2.0.0_cn1 | ml/classification/Classifier.scala | Scala | apache-2.0 | 9,405 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.tcsp
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import javax.inject.Inject
import models.tcsp.{CompanyFormationAgent, RegisteredOfficeEtc, Tcsp, TcspTypes}
import play.api.mvc.MessagesControllerComponents
import utils.AuthAction
import views.html.tcsp.service_provider_types
import scala.concurrent.Future
class TcspTypesController @Inject()(val dataCacheConnector: DataCacheConnector,
val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
service_provider_types: service_provider_types) extends AmlsBaseController(ds, cc) {
def get(edit: Boolean = false) = authAction.async {
implicit request =>
dataCacheConnector.fetch[Tcsp](request.credId, Tcsp.key) map {
response =>
val form: Form2[TcspTypes] = (for {
tcsp <- response
tcspTypes <- tcsp.tcspTypes
} yield Form2[TcspTypes](tcspTypes)).getOrElse(EmptyForm)
Ok(service_provider_types(form, edit))
}
}
def post(edit: Boolean = false) = authAction.async {
implicit request =>
Form2[TcspTypes](request.body) match {
case f: InvalidForm =>
Future.successful(BadRequest(service_provider_types(f, edit)))
case ValidForm(_, data) => {
val companyFormOrRegisteredOffice = (data.serviceProviders.contains(CompanyFormationAgent), data.serviceProviders.contains(RegisteredOfficeEtc))
val result = for {
tcsp <- dataCacheConnector.fetch[Tcsp](request.credId, Tcsp.key)
cache <- dataCacheConnector.save[Tcsp](request.credId, Tcsp.key,
{
companyFormOrRegisteredOffice match {
case (false, false) => tcsp.tcspTypes(data).copy(onlyOffTheShelfCompsSold = None, complexCorpStructureCreation = None, providedServices = None)
case (false, true) => tcsp.tcspTypes(data).copy(onlyOffTheShelfCompsSold = None, complexCorpStructureCreation = None)
case (true, false) => tcsp.tcspTypes(data).copy(providedServices = None)
case (true, true) => tcsp.tcspTypes(data)
}
})
} yield cache
result map { _ =>
companyFormOrRegisteredOffice match {
case (true, _) => Redirect(routes.OnlyOffTheShelfCompsSoldController.get(edit))
case (false, true) => Redirect(routes.ProvidedServicesController.get(edit))
case _ => edit match {
case true => Redirect(routes.SummaryController.get)
case false => Redirect(routes.ServicesOfAnotherTCSPController.get())
}
}
}
}
}
}
}
| hmrc/amls-frontend | app/controllers/tcsp/TcspTypesController.scala | Scala | apache-2.0 | 3,571 |
import com.myltsev.raw.{ CountingSort, Bulk, BulkType }
import java.util.{ List ⇒ JList }
import org.apache.hadoop.io.{ NullWritable, LongWritable, Text }
import org.apache.hadoop.mapred.Mapper
import org.apache.hadoop.mrunit.MapDriver
import org.apache.hadoop.mrunit.types.{ Pair ⇒ MRPair }
package object ops {
implicit class ListCompanionOps(val input: List[String]) extends AnyVal {
def mustBeMappedTo(expectedOutput: List[(BulkType.BulkType, Int)]) = {
val mapper: Mapper[LongWritable, Text, Bulk, NullWritable] = new CountingSort.Map()
val mapDriver: MapDriver[LongWritable, Text, Bulk, NullWritable] =
new MapDriver[LongWritable, Text, Bulk, NullWritable](mapper)
import scala.collection.JavaConversions._
val jInput: JList[MRPair[LongWritable, Text]] =
input
.zipWithIndex
.map { case (line, offset) ⇒ new MRPair(new LongWritable(offset), new Text(line)) }
val jOutput: JList[MRPair[Bulk, NullWritable]] =
expectedOutput.map(x ⇒ new MRPair(Bulk(x._1, x._2), NullWritable.get))
mapDriver.withAll(jInput).withAllOutput(jOutput).runTest(false)
}
}
} | alexander-myltsev/scala-hadoop-sandbox | src/test/scala/com/myltsev/raw/ops.scala | Scala | unlicense | 1,159 |
import scala.quoted.*
class Foo {
inline def foo: Unit = ${Foo.impl}
object Bar {
inline def foo: Unit = ${Foo.impl}
}
}
object Foo {
class Baz {
inline def foo: Unit = ${impl}
}
object Quox {
inline def foo: Unit = ${Foo.impl}
}
def impl(using Quotes): Expr[Unit] = '{}
}
| dotty-staging/dotty | tests/pos-macros/quote-non-static-macro.scala | Scala | apache-2.0 | 303 |
class BCPBooleanEqTest {
def m(): Unit = {
val a = true
val b = a == true
val c = a == false
val d = false
val e = d == true
val f = d == false
val g = d == 10
val h = d == 10.0
val i = d == "test"
}
} | jean-andre-gauthier/scalasca | src/test/scala/lara/epfl/scalasca/tests/unit/executables/blockconstantpropagation/BlockConstantPropagationBooleanEqTest.scala | Scala | bsd-3-clause | 221 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.codecs.laws.discipline
import kantan.codecs.strings.{codecs, DecodeError}
trait DisciplinePackage extends PlatformSpecificDisciplinePackage {
// We're using non-standard field names (since they're basically companion objects). Calm scalastyle down.
// scalastyle:off
type CodecTests[E, D, F, T] = kantan.codecs.laws.discipline.CodecTests[E, D, F, T]
val CodecTests = kantan.codecs.laws.discipline.CodecTests
type DecoderTests[E, D, F, T] = kantan.codecs.laws.discipline.DecoderTests[E, D, F, T]
val DecoderTests = kantan.codecs.laws.discipline.DecoderTests
type EncoderTests[E, D, T] = kantan.codecs.laws.discipline.EncoderTests[E, D, T]
val EncoderTests = kantan.codecs.laws.discipline.EncoderTests
type StringEncoderTests[A] = EncoderTests[String, A, codecs.type]
val StringEncoderTests = kantan.codecs.laws.discipline.StringEncoderTests
type StringDecoderTests[A] = DecoderTests[String, A, DecodeError, codecs.type]
val StringDecoderTests = kantan.codecs.laws.discipline.StringDecoderTests
type StringCodecTests[A] = CodecTests[String, A, DecodeError, codecs.type]
val StringCodecTests = kantan.codecs.laws.discipline.StringCodecTests
type DisciplineSuite = kantan.codecs.laws.discipline.DisciplineSuite
// scalastyle:on
}
| nrinaudo/kantan.codecs | laws/shared/src/main/scala/kantan/codecs/laws/discipline/DisciplinePackage.scala | Scala | apache-2.0 | 1,881 |
/*
* Copyright 2014 Claude Mamo
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package controllers
import play.api.mvc.{Controller, Action}
import play.api.libs.json._
object Group extends Controller {
def index() = Action {
val zookeepers = models.Group.findByName("ALL").get.zookeepers
Ok(Json.toJson(zookeepers))
}
}
| stealthly/kafka-web-console | app/controllers/Group.scala | Scala | apache-2.0 | 850 |
package com.nashsoftware.Controller
/**
* Created with IntelliJ IDEA.
* User: jlong
* Date: 8/18/13
* Time: 5:20 PM
* To change this template use File | Settings | File Templates.
*/
class StdOutController extends BaseController {
var controlStatus: ControlStatus = new ControlStatus(0,0,0,0)
def land() = {
if (state != DroneState.LANDED) {
println("Landing")
state = DroneState.LANDED
}
}
def takeOff() = {
if (state != DroneState.HOVERING) {
println("Launching")
state = DroneState.HOVERING
}
}
def move(pitch: Float, roll: Float, gaz: Float, yaw: Float) = {
if (state == DroneState.HOVERING) {
state =DroneState.FLYING
println("Moving")
}
if (state == DroneState.FLYING) {
controlStatus = new ControlStatus(pitch, roll, yaw, gaz)
}
}
def emergencyLand() = {
println("Crashing")
}
def hover() = {
if (state != DroneState.HOVERING) {
println("HOVERING")
state = DroneState.HOVERING
}
}
def trim() = {
println("Reset Trim")
}
}
| longjos/Leap-ARDrone | src/com/nashsoftware/Controller/StdOutController.scala | Scala | mit | 1,075 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.io.vega
import edu.latrobe._
import org.json4s.JsonAST._
final class ScatterPlot2D
extends Chart2D[ScatterPlot2D] {
override def repr
: ScatterPlot2D = this
private var _xAxisFormat
: String = "linear"
def xAxisFormat
: String = _xAxisFormat
def xAxisFormat_=(value: String)
: Unit = {
require(value != null)
_xAxisFormat = value
}
def setXAxisFormat(value: String)
: ScatterPlot2D = {
xAxisFormat_=(value)
this
}
private var _xAxisLabel
: Option[String] = None
def xAxisLabel
: Option[String] = _xAxisLabel
def xAxisLabel_=(value: String)
: Unit = xAxisLabel_=(Option(value))
def xAxisLabel_=(value: Option[String])
: Unit = {
require(value != null)
_xAxisLabel = value
}
def setXAxisLabel(value: String)
: ScatterPlot2D = {
xAxisLabel_=(value)
this
}
def setXAxisLabel(value: Option[String])
: ScatterPlot2D = {
xAxisLabel_=(value)
this
}
private var _xAxisMinValue
: Option[Real] = Some(Real.zero)
def xAxisMinValue
: Option[Real] = _xAxisMinValue
def xAxisMinValue_=(value: Option[Real])
: Unit = {
require(value != null)
_xAxisMinValue = value
}
def setXAxisMinValue(value: Option[Real])
: ScatterPlot2D = {
xAxisMinValue_=(value)
this
}
private var _xAxisMaxValue
: Option[Real] = None
def xAxisMaxValue
: Option[Real] = _xAxisMaxValue
def xAxisMaxValue_=(value: Option[Real])
: Unit = {
require(value != null)
_xAxisMaxValue = value
}
def setXAxisMaxValue(value: Option[Real])
: ScatterPlot2D = {
xAxisMaxValue_=(value)
this
}
private var _y0AxisFormat
: String = "linear"
def y0AxisFormat
: String = _y0AxisFormat
def y0AxisFormat_=(value: String)
: Unit = {
require(value != null)
_y0AxisFormat = value
}
def setY0AxisFormat(value: String)
: ScatterPlot2D = {
y0AxisFormat_=(value)
this
}
private var _y0AxisLabel
: Option[String] = None
def y0AxisLabel
: Option[String] = _y0AxisLabel
def y0AxisLabel_=(value: String)
: Unit = y0AxisLabel_=(Option(value))
def y0AxisLabel_=(value: Option[String])
: Unit = {
require(value != null)
_y0AxisLabel = value
}
def setY0AxisLabel(value: String)
: ScatterPlot2D = {
y0AxisLabel_=(value)
this
}
def setY0AxisLabel(value: Option[String])
: ScatterPlot2D = {
y0AxisLabel_=(value)
this
}
private var _y0AxisMinValue
: Option[Real] = None
def y0AxisMinValue
: Option[Real] = _y0AxisMinValue
def y0AxisMinValue_=(value: Option[Real])
: Unit = {
require(value != null)
_y0AxisMinValue = value
}
def setY0AxisMinValue(value: Option[Real])
: ScatterPlot2D = {
y0AxisMinValue_=(value)
this
}
private var _y0AxisMaxValue
: Option[Real] = None
def y0AxisMaxValue
: Option[Real] = _y0AxisMaxValue
def y0AxisMaxValue_=(value: Option[Real])
: Unit = {
require(value != null)
_y0AxisMaxValue = value
}
def setY0AxisMaxValue(value: Option[Real])
: ScatterPlot2D = {
y0AxisMaxValue_=(value)
this
}
private var _y1AxisFormat
: String = "linear"
def y1AxisFormat
: String = _y1AxisFormat
def y1AxisFormat_=(value: String)
: Unit = {
require(value != null)
_y1AxisFormat = value
}
def setY1AxisFormat(value: String)
: ScatterPlot2D = {
y0AxisFormat_=(value)
this
}
private var _y1AxisLabel
: Option[String] = None
def y1AxisLabel
: Option[String] = _y1AxisLabel
def y1AxisLabel_=(value: String)
: Unit = y1AxisLabel_=(Option(value))
def y1AxisLabel_=(value: Option[String])
: Unit = {
require(value != null)
_y1AxisLabel = value
}
def setY1AxisLabel(value: String)
: ScatterPlot2D = {
y1AxisLabel_=(value)
this
}
def setY1AxisLabel(value: Option[String])
: ScatterPlot2D = {
y1AxisLabel_=(value)
this
}
private var _y1AxisMinValue
: Option[Real] = None
def y1AxisMinValue
: Option[Real] = _y1AxisMinValue
def y1AxisMinValue_=(value: Option[Real])
: Unit = {
require(value != null)
_y1AxisMinValue = value
}
def setY1AxisMinValue(value: Option[Real])
: ScatterPlot2D = {
y1AxisMinValue_=(value)
this
}
private var _y1AxisMaxValue
: Option[Real] = None
def y1AxisMaxValue
: Option[Real] = _y1AxisMaxValue
def y1AxisMaxValue_=(value: Option[Real])
: Unit = {
require(value != null)
_y1AxisMaxValue = value
}
def setY1AxisMaxValue(value: Option[Real])
: ScatterPlot2D = {
y1AxisMaxValue_=(value)
this
}
override protected def doToJson()
: List[JField] = {
val y0Used = series.exists(_.yAxisNo == 0)
val y1Used = series.exists(_.yAxisNo == 1)
val data = Json.field("data", {
val builder = List.newBuilder[JObject]
series.foreach(
builder += _.toJson
)
builder.result()
})
val scales = {
val builder = List.newBuilder[JObject]
if (true) {
val fields = List.newBuilder[JField]
fields += Json.field("name", "x")
fields += Json.field("type", _xAxisFormat)
fields += Json.field("range", "width")
fields += Json.field(
"domain",
Json.field(
"fields", {
val builder = List.newBuilder[JObject]
series.foreach(s => {
builder += JObject(
Json.field("data", s.label),
Json.field("field", "x")
)
})
builder.result()
}
)
)
fields += Json.field("zero", value = false)
_xAxisMinValue.foreach(
fields += Json.field("domainMin", _)
)
_xAxisMaxValue.foreach(
fields += Json.field("domainMax", _)
)
builder += JObject(fields.result())
}
if (y0Used) {
val fields = List.newBuilder[JField]
fields += Json.field("name", "y0")
fields += Json.field("type", _y0AxisFormat)
fields += Json.field("range", "height")
fields += Json.field(
"domain",
Json.field(
"fields", {
val builder = List.newBuilder[JObject]
series.foreach(s => {
if (s.yAxisNo == 0) {
builder += JObject(
Json.field("data", s.label),
Json.field("field", "y")
)
}
})
builder.result()
}
)
)
fields += Json.field("zero", value = false)
_y0AxisMinValue.foreach(
fields += Json.field("domainMin", _)
)
_y0AxisMaxValue.foreach(
fields += Json.field("domainMax", _)
)
builder += JObject(fields.result())
}
if (y1Used) {
val fields = List.newBuilder[JField]
fields += Json.field("name", "y1")
fields += Json.field("type", _y1AxisFormat)
fields += Json.field("range", "height")
fields += Json.field(
"domain",
Json.field(
"fields", {
val builder = List.newBuilder[JObject]
series.foreach(s => {
if (s.yAxisNo == 1) {
builder += JObject(
Json.field("data", s.label),
Json.field("field", "y")
)
}
})
builder.result()
}
)
)
fields += Json.field("zero", value = false)
fields += Json.field("zero", value = false)
_y1AxisMinValue.foreach(
fields += Json.field("domainMin", _)
)
_y1AxisMaxValue.foreach(
fields += Json.field("domainMax", _)
)
builder += JObject(fields.result())
}
if (true) {
val fields = List.newBuilder[JField]
fields += Json.field("name", "g")
fields += Json.field("type", "ordinal")
fields += Json.field(
"range", {
val builder = List.newBuilder[JString]
series.foreach(s => {
val c = s.color
builder += Json(
f"#${c.getRed}%02x${c.getGreen}%02x${c.getBlue}%02x"
)
})
builder.result()
}
)
fields += Json.field(
"domain", {
val builder = List.newBuilder[JString]
series.foreach(s => {
builder += Json(s.label)
})
builder.result()
}
)
builder += JObject(fields.result())
}
Json.field("scales", builder.result())
}
val axes = {
val builder = List.newBuilder[JObject]
if (true) {
builder += JObject(
Json.field("type", "x"),
Json.field("scale", "x"),
Json.field("title", _xAxisLabel.getOrElse("")),
Json.field("grid", value = true)
)
}
if (y0Used) {
builder += JObject(
Json.field("type", "y"),
Json.field("scale", "y0"),
Json.field("title", _y0AxisLabel.getOrElse("")),
Json.field("grid", value = true)
)
}
if (y1Used) {
builder += JObject(
Json.field("type", "y"),
Json.field("scale", "y1"),
Json.field("title", _y1AxisLabel.getOrElse("")),
Json.field("grid", value = true),
Json.field("orient", "right")
)
}
Json.field("axes", builder.result())
}
val legends = {
val properties = Json.field(
"properties",
Json.field(
"symbols",
Json.field(
"stroke",
Json.field("value", "transparent")
)
)/*,
Json.field(
"legend",
Json.field(
"stroke",
Json.field("value", "#dddddd")
)
)*/
)
val builder = List.newBuilder[JObject]
if (y0Used) {
builder += JObject(
Json.field("fill", "g"),
Json.field("title", _y0AxisLabel.getOrElse("Left")),
Json.field(
"values", {
val builder = List.newBuilder[JString]
series.foreach(s => {
if (s.yAxisNo == 0) {
builder += Json(s.label)
}
})
builder.result()
}
),
properties
)
}
if (y1Used) {
builder += JObject(
Json.field("fill", "g"),
Json.field("title", _y1AxisLabel.getOrElse("Right")),
Json.field(
"values", {
val builder = List.newBuilder[JString]
series.foreach(s => {
if (s.yAxisNo == 1) {
builder += Json(s.label)
}
})
builder.result()
}
),
properties
)
}
Json.field("legends", builder.result())
}
val marks = {
val builder = List.newBuilder[JObject]
series.foreach(s => {
builder += JObject(
Json.field("type", if (s.symbolSize > 0) "symbol" else "line"),
Json.field(
"from",
Json.field("data", s.label)
),
Json.field(
"properties",
Json.field(
"enter", {
val fields = List.newBuilder[JField]
fields += Json.field(
"x",
Json.field("scale", "x"),
Json.field("field", "x")
)
fields += Json.field(
"y",
Json.field("scale", s"y${s.yAxisNo}"),
Json.field("field", "y")
)
fields += Json.field(
"stroke",
Json.field("scale", "g"),
Json.field("value", s.label)
)
fields += Json.field(
"strokeWidth",
Json.field("value", s.lineWidth)
)
if (s.symbolSize > 0) {
fields += Json.field(
"fill",
Json.field("scale", "g"),
Json.field("value", s.label)
)
}
JObject(fields.result())
}
),
Json.field("update", JObject()),
Json.field("hover", JObject())
)
)
})
Json.field("marks", builder.result())
}
List(
Json.field("width", width),
Json.field("height", height),
data,
scales,
axes,
legends,
marks
)
}
override protected def doCopy()
: ScatterPlot2D = ScatterPlot2D()
override protected def doCopyTo(other: Chart)
: Unit = other match {
case other: ScatterPlot2D =>
other._xAxisFormat = _xAxisFormat
other._xAxisLabel = _xAxisLabel
other._xAxisMinValue = _xAxisMinValue
other._xAxisMaxValue = _xAxisMaxValue
other._y0AxisFormat = _y0AxisFormat
other._y0AxisLabel = _y0AxisLabel
other._y0AxisMinValue = _y0AxisMinValue
other._y0AxisMaxValue = _y0AxisMaxValue
other._y1AxisFormat = _y1AxisFormat
other._y1AxisLabel = _y1AxisLabel
other._y1AxisMinValue = _y1AxisMinValue
other._y1AxisMaxValue = _y1AxisMaxValue
case _ =>
throw new MatchError(other)
}
}
object ScatterPlot2D {
final def apply()
: ScatterPlot2D = new ScatterPlot2D
}
| bashimao/ltudl | base/src/main/scala/edu/latrobe/io/vega/ScatterPlot2D.scala | Scala | apache-2.0 | 14,339 |
package services
import play.api.libs.json.{Format, Json}
import scala.concurrent.Future
/**
* This is the interface of the GitHub service which returns the list of repositories that a user can
* access in Github.
*/
trait GitHubService {
/**
* Get a list of projects for a certain Github account
* @param oauthToken Authentication token which is received from Github
* @return a Future List of Github repositories
*/
def getGitHubProjects(oauthToken: String): Future[List[String]]
}
/**
* Companion object including case class model for GitHub related entities (Owner, Permission, Repo),
* and Reads/Writes to convert them from/to json format.
*/
object GitHubService {
/**
* Model case class for the github repository owner
* @param login
* @param id
* @param avatar_url
* @param gravatar_id
* @param url
* @param html_url
* @param followers_url
* @param following_url
* @param gists_url
* @param starred_url
* @param subscriptions_url
* @param organizations_url
* @param repos_url
* @param events_url
* @param received_events_url
* @param `type`
* @param site_admin
*/
case class Owner(
login: String,
id: Int,
avatar_url: String,
gravatar_id: String,
url: String,
html_url: String,
followers_url: String,
following_url: String,
gists_url: String,
starred_url: String,
subscriptions_url: String,
organizations_url: String,
repos_url: String,
events_url: String,
received_events_url: String,
`type`:String,
site_admin:Boolean
)
/**
* Model case class for the permissions applied on a certain Github repository
* @param admin
* @param push
* @param pull
*/
case class Permission(admin: Boolean, push: Boolean, pull: Boolean)
/**
* Model case class for a github repository
* @param id
* @param name
* @param full_name
* @param owner
* @param `private`
* @param html_url
* @param description
* @param fork
* @param url
* @param forks_url
* @param keys_url
* @param collaborators_url
* @param teams_url
* @param hooks_url
* @param issue_events_url
* @param events_url
* @param assignees_url
* @param branches_url
* @param tags_url
* @param blobs_url
* @param permissions
*/
case class Repo(
id: Int,
name:String,
full_name:String,
owner: Owner,
`private` : Boolean,
html_url :String,
description: Option[String],
fork: Boolean,
url: String,
forks_url: String,
keys_url: String,
collaborators_url:String,
teams_url:String,
hooks_url:String,
issue_events_url:String,
events_url:String,
assignees_url:String,
branches_url:String,
tags_url:String,
blobs_url:String,
permissions: Permission
)
implicit val ownerFormat: Format[Owner] = Json.format[Owner]
implicit val permissionFormat: Format[Permission] = Json.format[Permission]
implicit val repoFormat: Format[Repo] = Json.format[Repo]
}
| dcharoulis/teahub | app/services/GitHubService.scala | Scala | apache-2.0 | 3,704 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.