Skip to content

Commit

Permalink
Add a basic ThreadNet test for pipelining of forged blocks
Browse files Browse the repository at this point in the history
  • Loading branch information
amesgen authored Apr 19, 2022
1 parent a50c3f5 commit d5af480
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 6 deletions.
35 changes: 35 additions & 0 deletions ouroboros-consensus-test/src/Test/ThreadNet/General.hs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ import Ouroboros.Consensus.Node.Run
import Ouroboros.Consensus.NodeId
import Ouroboros.Consensus.Protocol.Abstract (LedgerView)
import Ouroboros.Consensus.Protocol.LeaderSchedule
import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB
import Ouroboros.Consensus.TypeFamilyWrappers

import Ouroboros.Consensus.Util.Condense
Expand Down Expand Up @@ -478,6 +479,7 @@ prop_general_internal syncity pga testOutput =
prop_no_BlockRejections .&&.
prop_no_unexpected_CannotForges .&&.
prop_no_invalid_blocks .&&.
prop_pipelining .&&.
propSync
( prop_all_common_prefix maxForkLength (Map.elems nodeChains) .&&.
prop_all_growth .&&.
Expand Down Expand Up @@ -854,6 +856,39 @@ prop_general_internal syncity pga testOutput =
, (s, blk) <- Map.toAscList nodeOutputForges
]

-- Check that all self-issued blocks are pipelined.
prop_pipelining :: Property
prop_pipelining = conjoin
[ counterexample ("Node " <> condense nid <> " did not pipeline") $
counterexample ("some of its blocks forged as the sole slot leader:") $
counterexample (condense forgedButNotPipelined) $
Set.null forgedButNotPipelined
| (nid, NodeOutput
{ nodeOutputForges
, nodePipeliningEvents
}) <- Map.toList testOutputNodes
, CoreId cnid <- [nid]
, let tentativePoints = Set.fromList
[ headerPoint hdr
| ChainDB.SetTentativeHeader hdr <- nodePipeliningEvents
]
forgedAsSoleLeaderPoints = Set.fromList $
[ blockPoint blk
| blk <- Map.elems nodeOutputForges
, let s = blockSlot blk
NodeRestarts nrs = nodeRestarts
, getLeaderSchedule actualLeaderSchedule Map.! s == [cnid]
-- When the node is restarted while it is a slot
-- leader, this property is often not satisfied in
-- the Byron ThreadNet tests. As diffusion
-- pipelining is concerned with up-to-date,
-- long-running nodes, we ignore this edge case.
, cnid `Map.notMember` Map.findWithDefault mempty s nrs
]
forgedButNotPipelined =
forgedAsSoleLeaderPoints Set.\\ tentativePoints
]

{-------------------------------------------------------------------------------
Final chains properties
-------------------------------------------------------------------------------}
Expand Down
25 changes: 19 additions & 6 deletions ouroboros-consensus-test/src/Test/ThreadNet/Network.hs
Original file line number Diff line number Diff line change
Expand Up @@ -692,13 +692,14 @@ runThreadNetwork systemTime ThreadNetworkArgs
-- ^ block selection tracer
-> Tracer m (LedgerUpdate blk)
-- ^ ledger updates tracer
-> Tracer m (ChainDB.TracePipeliningEvent blk)
-> NodeDBs (StrictTVar m MockFS)
-> CoreNodeId
-> ChainDbArgs Identity m blk
mkArgs
clock registry
cfg initLedger
invalidTracer addTracer selTracer updatesTracer
invalidTracer addTracer selTracer updatesTracer pipeliningTracer
nodeDBs _coreNodeId = ChainDbArgs {
-- HasFS instances
cdbHasFSImmutableDB = SomeHasFS $ simHasFS (nodeDBsImm nodeDBs)
Expand Down Expand Up @@ -754,6 +755,10 @@ runThreadNetwork systemTime ThreadNetworkArgs
mapM_ (traceWith updatesTracer) updates
traceWith selTracer (ChainDB.newTipPoint p, prj new)

ChainDB.TraceAddBlockEvent
(ChainDB.PipeliningEvent e)
-> traceWith pipeliningTracer e

_ -> pure ()

-- We don't expect any ledger warnings
Expand Down Expand Up @@ -794,16 +799,18 @@ runThreadNetwork systemTime ThreadNetworkArgs
wrapTracer tr = Tracer $ \(p, bno) -> do
s <- OracularClock.getCurrentSlot clock
traceWith tr (s, p, bno)
addTracer = wrapTracer $ nodeEventsAdds nodeInfoEvents
selTracer = wrapTracer $ nodeEventsSelects nodeInfoEvents
headerAddTracer = wrapTracer $ nodeEventsHeaderAdds nodeInfoEvents
addTracer = wrapTracer $ nodeEventsAdds nodeInfoEvents
selTracer = wrapTracer $ nodeEventsSelects nodeInfoEvents
headerAddTracer = wrapTracer $ nodeEventsHeaderAdds nodeInfoEvents
pipeliningTracer = nodeEventsPipelining nodeInfoEvents
let chainDbArgs = mkArgs
clock registry
pInfoConfig pInfoInitLedger
invalidTracer
addTracer
selTracer
updatesTracer
pipeliningTracer
nodeInfoDBs
coreNodeId
chainDB <- snd <$>
Expand Down Expand Up @@ -1396,6 +1403,8 @@ data NodeEvents blk ev = NodeEvents
-- ^ 'ChainDB.getTipBlockNo' for each node at the onset of each slot
, nodeEventsUpdates :: ev (LedgerUpdate blk)
-- ^ Ledger updates every time we adopt a block/switch to a fork
, nodeEventsPipelining :: ev (ChainDB.TracePipeliningEvent blk)
-- ^ Pipelining events tracking the tentative header
}

-- | A vector with an element for each database of a node
Expand Down Expand Up @@ -1423,9 +1432,10 @@ newNodeInfo = do
(t5, m5) <- recordingTracerTVar
(t6, m6) <- recordingTracerTVar
(t7, m7) <- recordingTracerTVar
(t8, m8) <- recordingTracerTVar
pure
( NodeEvents t1 t2 t3 t4 t5 t6 t7
, NodeEvents <$> m1 <*> m2 <*> m3 <*> m4 <*> m5 <*> m6 <*> m7
( NodeEvents t1 t2 t3 t4 t5 t6 t7 t8
, NodeEvents <$> m1 <*> m2 <*> m3 <*> m4 <*> m5 <*> m6 <*> m7 <*> m8
)

(nodeInfoDBs, readDBs) <- do
Expand Down Expand Up @@ -1461,6 +1471,7 @@ data NodeOutput blk = NodeOutput
, nodeOutputNodeDBs :: NodeDBs MockFS
, nodeOutputSelects :: Map SlotNo [(RealPoint blk, BlockNo)]
, nodeOutputUpdates :: [LedgerUpdate blk]
, nodePipeliningEvents :: [ChainDB.TracePipeliningEvent blk]
}

data TestOutput blk = TestOutput
Expand Down Expand Up @@ -1494,6 +1505,7 @@ mkTestOutput vertexInfos = do
, nodeEventsSelects
, nodeEventsTipBlockNos
, nodeEventsUpdates
, nodeEventsPipelining
} = nodeInfoEvents
let nodeOutput = NodeOutput
{ nodeOutputAdds =
Expand All @@ -1520,6 +1532,7 @@ mkTestOutput vertexInfos = do
, nodeOutputInvalids = (:[]) <$> Map.fromList nodeEventsInvalids
, nodeOutputNodeDBs = nodeInfoDBs
, nodeOutputUpdates = nodeEventsUpdates
, nodePipeliningEvents = nodeEventsPipelining
}

pure
Expand Down

0 comments on commit d5af480

Please sign in to comment.