Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
sss.openstar.test.MultiTxCounterTest.scala Maven / Gradle / Ivy
package sss.openstar.test
import akka.actor.Status.{Failure => FutureFailure}
import akka.actor.{Actor, ActorContext, ActorLogging, ActorRef, ActorSystem, Cancellable, Props, ReceiveTimeout}
import akka.pattern.pipe
import sss.db
import sss.openstar.StartupHooks.HookDone
import sss.openstar.chains.TxWriterActor._
import sss.openstar.counterledger.{CounterLedger, CounterLedgerTx, FailOnTheseNodes, IncrementFailure}
import sss.openstar.ledger.{LedgerId, LedgerItem, SignedTxEntry}
import sss.openstar.network.MessageEventBus
import sss.openstar.test.MultiIdentityTestTransactionSender.FireTxs
import sss.openstar.tools.SendTxSupport.SendTx
import scala.concurrent.duration._
case class TestCaseConfig(assertCallback: AssertionContext => Unit,
chaosNodes: Seq[String],
probabilityOfTxFailingOnNode: Double = 0,
failingProcessingNodes: Seq[String] = Seq.empty,
maxTotalTxs: Option[Int] = Some(5000),
maxRejectedTxs: Option[Int] = Some(500))
case class AssertionContext(totalProcessed: Long, counter: Long, failures: Long, as: ActorSystem)
case class Printout()
object MultiTxCounterTest {
def apply(counterLedger: CounterLedger,
counterLedgerId: LedgerId,
maxInProgress: Int,
testCaseConfig: TestCaseConfig)(implicit context: ActorContext,
sendTx: SendTx,
events: MessageEventBus,
runContext: db.SyncRunContext): ActorRef = {
context.actorOf(Props(new MultiTxCounterTest(counterLedger, counterLedgerId, maxInProgress, testCaseConfig)), "MultiTx")
}
}
class MultiTxCounterTest(counterLedger: CounterLedger,
counterLedgerId: LedgerId,
maxInProgress: Int,
testCaseConfig: TestCaseConfig)(implicit actorContext: ActorContext,
sendTx: SendTx,
events: MessageEventBus,
runContext: db.SyncRunContext)
extends Actor with ActorLogging {
override def preStart(): Unit = {
super.preStart()
events.subscribe(HookDone.getClass)
}
lazy private val failsOnNodeId = CoinToss(testCaseConfig.probabilityOfTxFailingOnNode)
private def createLedgerItem(cmd: CounterLedgerTx) =
LedgerItem(counterLedgerId, cmd.txId, SignedTxEntry(cmd.toBytes).toBytes)
private def createIncrementFailure = createLedgerItem(IncrementFailure())
private def createFailOnNodes = {
createLedgerItem(FailOnTheseNodes(Seq.empty))
}
import actorContext.dispatcher
actorContext.setReceiveTimeout(120.seconds)
val processingFinishedTimeout = 30.seconds
val printoutTimeout = 60.seconds
val as = actorContext.system
private var inProgress = 0
private var totalTxsProcessed: Int = 0
private var rejectedTxs: Int = 0
private var schedFire: Option[Cancellable] = None
private def scheduleFireTxs(delayInSeconds: Int) = schedFire = schedFire match {
case None => Option(as.scheduler.scheduleOnce(delayInSeconds.seconds, self, FireTxs))
case x => x
}
override def receive: Receive = {
case HookDone =>
log.info("Starting!")
scheduleFireTxs(1)
as.scheduler.scheduleOnce(printoutTimeout, self, Printout)
case FutureFailure(e) =>
log.debug(s"Failed, retry, $e")
inProgress -= 1
scheduleFireTxs(2)
case r: InternalNack =>
log.warning(s"Rejected tx $r")
rejectedTxs += 1
if (testCaseConfig.maxRejectedTxs.fold(false)(_ < rejectedTxs)) {
as.scheduler.scheduleOnce(processingFinishedTimeout, self, ReceiveTimeout)
} else {
sendTx(createIncrementFailure).whenAvailableLocally.map(_.commitTxResult.internalCommit) pipeTo self
}
case ReceiveTimeout =>
val counter = counterLedger.successCounter().runSyncAndGet
val failures = counterLedger.failureCounter().runSyncAndGet
log.info(s"totalProcessed = $totalTxsProcessed counter = $counter failures = $failures")
testCaseConfig.assertCallback(AssertionContext(totalTxsProcessed, counter, failures, as))
case com: InternalCommit =>
log.debug(s"Got commit: $com, going again")
inProgress -= 1
totalTxsProcessed += 1
if (testCaseConfig.maxTotalTxs.fold(false)(_ < totalTxsProcessed)) {
as.scheduler.scheduleOnce(processingFinishedTimeout, self, ReceiveTimeout)
} else {
self ! FireTxs
}
case r: InternalTempNack =>
log.debug("TEMP NACK")
inProgress -= 1
if (inProgress % 20 == 0) log.debug(r.toString + s" TxResult $r rescheule")
scheduleFireTxs(2)
case FireTxs if inProgress < maxInProgress =>
log.debug("FireTxs!")
schedFire = None
(0 until (maxInProgress - inProgress)) foreach { _ =>
inProgress += 1
sendTx(createFailOnNodes).whenAvailableLocally.map(_.commitTxResult.internalCommit) pipeTo self
}
case FireTxs =>
log.debug("FireTxs ")
schedFire.map(_.cancel())
schedFire = None
if (inProgress % 2 == 0) log.debug(s"$inProgress in progress, wait a few.... (total ${totalTxsProcessed})")
scheduleFireTxs(1)
case Printout =>
log.info(s"Printout total=$totalTxsProcessed and rejected=$rejectedTxs")
as.scheduler.scheduleOnce(printoutTimeout, self, Printout)
}
}