From a969b377cac85634681b4a6e872a91ce0150b598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Mon, 6 May 2024 09:52:23 +0000 Subject: [PATCH] Allow to specify tests with multiple honest peers * Rewrite `Peers` to accept arbitrary number of peers * Actually generate honest peers in CSJ happy path * Support a field for extra honest peers in `GenesisTest` * Allow `uniformPoints` to generate schedules with multiple honest peers * Adapt CSJ test to use native multiple honest peers generation * Share partial accessor functions used in tests * Use partial accessor to retrieve the only honest peer --- .../ouroboros-consensus-diffusion.cabal | 1 + .../Consensus/Genesis/Setup/Classifiers.hs | 18 +- .../Test/Consensus/Genesis/Setup/GenChains.hs | 12 +- .../Test/Consensus/Genesis/Tests/CSJ.hs | 32 +-- .../Genesis/Tests/DensityDisconnect.hs | 49 ++-- .../Test/Consensus/Genesis/Tests/LoE.hs | 22 +- .../Test/Consensus/Genesis/Tests/LoP.hs | 22 +- .../Test/Consensus/Genesis/Tests/Uniform.hs | 39 +-- .../Test/Consensus/PeerSimulator/Run.hs | 3 +- .../Consensus/PeerSimulator/StateDiagram.hs | 9 +- .../Test/Consensus/PointSchedule.hs | 93 +++++-- .../Test/Consensus/PointSchedule/Peers.hs | 234 +++++++++++------- .../Test/Consensus/PointSchedule/Shrinking.hs | 128 ++++++---- .../PointSchedule/Shrinking/Tests.hs | 8 +- .../Test/Util/PartialAccessors.hs | 42 ++++ 15 files changed, 431 insertions(+), 281 deletions(-) create mode 100644 ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index 26c461887c..39d63d5e69 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -266,6 +266,7 @@ test-suite consensus-test Test.Consensus.PointSchedule.SinglePeer Test.Consensus.PointSchedule.SinglePeer.Indices Test.Consensus.PointSchedule.Tests + Test.Util.PartialAccessors Test.Util.TersePrinting build-depends: diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs index 559b6f1712..a7eb599293 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs @@ -38,8 +38,7 @@ import Test.Consensus.Network.AnchoredFragment.Extras (slotLength) import Test.Consensus.PeerSimulator.StateView (PeerSimulatorResult (..), StateView (..), pscrToException) import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId (..), - Peers (..)) +import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.Util.Orphans.IOLike () import Test.Util.TestBlock (TestBlock, TestHash (TestHash), @@ -165,15 +164,15 @@ resultClassifiers GenesisTest{gtSchedule} RunGenesisTestResult{rgtrStateView} = StateView{svPeerSimulatorResults} = rgtrStateView adversaries :: [PeerId] - adversaries = Map.keys $ others gtSchedule + adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers gtSchedule adversariesCount = fromIntegral $ length adversaries adversariesExceptions :: [(PeerId, SomeException)] adversariesExceptions = mapMaybe (\PeerSimulatorResult{psePeerId, pseResult} -> case psePeerId of - HonestPeer -> Nothing - pid -> (pid,) <$> pscrToException pseResult + HonestPeer _ -> Nothing + pid -> (pid,) <$> pscrToException pseResult ) svPeerSimulatorResults @@ -251,18 +250,17 @@ scheduleClassifiers GenesisTest{gtSchedule = schedule} = rollbacks :: Peers Bool rollbacks = hasRollback <$> schedule - adversaryRollback = any value $ others rollbacks + adversaryRollback = any id $ adversarialPeers rollbacks + honestRollback = any id $ honestPeers rollbacks - honestRollback = value $ honest rollbacks - - allAdversariesEmpty = all value $ others $ null <$> schedule + allAdversariesEmpty = all id $ adversarialPeers $ null <$> schedule isTrivial :: PeerSchedule TestBlock -> Bool isTrivial = \case [] -> True (t0, _):points -> all ((== t0) . fst) points - allAdversariesTrivial = all value $ others $ isTrivial <$> schedule + allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> schedule simpleHash :: HeaderHash block ~ TestHash => diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs index 70bbceb8fa..e65772275c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs @@ -8,6 +8,7 @@ module Test.Consensus.Genesis.Setup.GenChains ( GenesisTest (..) , genChains + , genChainsWithExtraHonestPeers ) where import Cardano.Slotting.Time (SlotLength, getSlotLength, @@ -94,6 +95,9 @@ genAlternativeChainSchema (testRecipeH, arHonest) = let H.ChainSchema _ v = A.uniformAdversarialChain (Just alternativeAsc) testRecipeA'' seed pure $ Just (prefixCount, Vector.toList (getVector v)) +genChains :: QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) +genChains = genChainsWithExtraHonestPeers (pure 0) + -- | Random generator for a block tree. The block tree contains one trunk (the -- “honest” chain) and as many branches as given as a parameter (the -- “alternative” chains or “bad” chains). For instance, one such tree could be @@ -103,8 +107,10 @@ genAlternativeChainSchema (testRecipeH, arHonest) = -- trunk: O─────1──2──3──4─────5──6──7 -- │ ╰─────6 -- ╰─────3──4─────5 -genChains :: QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) -genChains genNumForks = do +-- For now, the @extraHonestPeers@ generator is only used to fill the GenesisTest field. +-- However, in the future it could also be used to generate "short forks" near the tip of the trunk. +genChainsWithExtraHonestPeers :: QC.Gen Word -> QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) +genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do (asc, honestRecipe, someHonestChainSchema) <- genHonestChainSchema H.SomeHonestChainSchema _ _ honestChainSchema <- pure someHonestChainSchema @@ -116,6 +122,7 @@ genChains genNumForks = do HonestRecipe (Kcp kcp) (Scg scg) delta _len = honestRecipe numForks <- genNumForks + gtExtraHonestPeers <- genNumExtraHonest alternativeChainSchemas <- replicateM (fromIntegral numForks) (genAlternativeChainSchema (honestRecipe, honestChainSchema)) pure $ GenesisTest { gtSecurityParam = SecurityParam (fromIntegral kcp), @@ -131,6 +138,7 @@ genChains genNumForks = do -- would make for interesting tests. gtCSJParams = CSJParams $ fromIntegral scg, gtBlockTree = foldl' (flip BT.addBranch') (BT.mkTrunk goodChain) $ zipWith (genAdversarialFragment goodBlocks) [1..] alternativeChainSchemas, + gtExtraHonestPeers, gtSchedule = () } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index 25da0a1dce..d0b8489952 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -3,9 +3,7 @@ module Test.Consensus.Genesis.Tests.CSJ (tests) where -import Control.Monad (replicateM) import Data.Containers.ListUtils (nubOrd) -import Data.Functor (($>)) import Data.List (nub) import Data.Maybe (mapMaybe) import Ouroboros.Consensus.Block (blockSlot, succWithOrigin) @@ -22,11 +20,11 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), import Test.Consensus.PeerSimulator.StateView (StateView (..)) import Test.Consensus.PeerSimulator.Trace (TraceEvent (..)) import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..), - mkPeers) +import Test.Consensus.PointSchedule.Peers (Peers (..), peers') import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestBlock (Header, TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize) @@ -63,14 +61,11 @@ tests = prop_happyPath :: Bool -> Property prop_happyPath synchronized = forAllGenesisTest - ( do - gt <- genChains $ pure 0 - honest <- genHonestSchedule gt - numOthers <- choose (1, 3) - otherHonests <- if synchronized - then pure $ replicate numOthers honest - else replicateM numOthers (genHonestSchedule gt) - pure $ gt $> mkPeers honest otherHonests + ( if synchronized + then genChainsWithExtraHonestPeers (choose (2, 4)) (pure 0) + `enrichedWith` genUniformSchedulePoints + else genChains (pure 0) + `enrichedWith` genDuplicatedHonestSchedule ) ( defaultSchedulerConfig { scEnableCSJ = True @@ -119,13 +114,12 @@ prop_happyPath synchronized = (receivedHeadersOnlyOnce && receivedHeadersFromOnlyOnePeer) ) where - -- | This might seem wasteful, as we discard generated adversarial schedules. - -- It actually isn't, since we call it on trees that have no branches besides - -- the trunk, so no adversaries are generated. - genHonestSchedule :: GenesisTest TestBlock () -> Gen (PeerSchedule TestBlock) - genHonestSchedule gt = do - ps <- genUniformSchedulePoints gt - pure $ value $ honest ps + genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PeersSchedule TestBlock) + genDuplicatedHonestSchedule gt@GenesisTest{gtExtraHonestPeers} = do + Peers {honestPeers} <- genUniformSchedulePoints gt + pure $ peers' + (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) + [] isNewerThanJumpSizeFromTip :: GenesisTestFull TestBlock -> Header TestBlock -> Bool isNewerThanJumpSizeFromTip gt hdr = diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index 87c85f54b9..7c6d9e75c8 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -59,6 +59,7 @@ import Test.QuickCheck.Extras (unsafeMapSuchThatJust) import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TersePrinting (terseHFragment, terseHeader) import Test.Util.TestBlock (TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize, @@ -120,7 +121,7 @@ staticCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = tips = branchTip <$> candidates candidates :: Map PeerId (AnchoredFragment TestBlock) - candidates = Map.fromList (zip (HonestPeer : enumerateAdversaries) chains) + candidates = Map.fromList (zip (HonestPeer 1 : enumerateAdversaries) chains) chains = btTrunk gtBlockTree : (btbFull <$> branches) @@ -134,8 +135,8 @@ prop_densityDisconnectStatic = let (disconnect, _) = densityDisconnect sgen k (mkState <$> suffixes) suffixes loeFrag counterexample "it should disconnect some node" (not (null disconnect)) .&&. - counterexample "it should not disconnect the honest peer" - (HonestPeer `notElem` disconnect) + counterexample "it should not disconnect the honest peers" + (not $ any isHonestPeerId disconnect) where mkState :: AnchoredFragment (Header TestBlock) -> ChainSyncState TestBlock mkState frag = @@ -193,7 +194,7 @@ initCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = fullTree = gtBlockTree } where - peers = mkPeers (peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)) (branchPeer <$> branches) + peers = peers' [peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)] (branchPeer <$> branches) branchPeer branch = peer (btbPrefix branch) (btbSuffix branch) (btbFull branch) @@ -230,8 +231,8 @@ data UpdateEvent = UpdateEvent { } snapshotTree :: Peers EvolvingPeer -> BlockTree (Header TestBlock) -snapshotTree Peers {honest, others} = - foldr addBranch' (mkTrunk (candidate (value honest))) (candidate . value <$> others) +snapshotTree Peers {honestPeers, adversarialPeers} = + foldr addBranch' (mkTrunk (candidate (getHonestPeer honestPeers))) (candidate <$> adversarialPeers) prettyUpdateEvent :: UpdateEvent -> [String] prettyUpdateEvent UpdateEvent {target, added, killed, bounds, tree, loeFrag, curChain} = @@ -274,7 +275,7 @@ updatePeers :: UpdateEvent -> Either (MonotonicityResult, Peers EvolvingPeer) Evolution updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, killed = killedNow} - | HonestPeer `Set.member` killedNow + | HonestPeer 1 `Set.member` killedNow = Left (HonestKilled, peers) | not (null violations) = Left (Nonmonotonic event, peers) @@ -287,12 +288,12 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k violations = killedBefore \\ killedNow -- The new state if no violations were detected - evo@Evolution {peers = Peers {others = remaining}} + evo@Evolution {peers = Peers {adversarialPeers = remaining}} | targetExhausted -- If the target is done, reset the set of killed peers, since other peers -- may have lost only against the target. -- Remove the target from the active peers. - = Evolution {peers = peers {others = Map.delete target (others peers)}, killed = mempty} + = Evolution {peers = deletePeer target peers, killed = mempty} | otherwise -- Otherwise replace the killed peers with the current set = Evolution {peers, killed = killedNow} @@ -312,11 +313,11 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k -- The selection will then be computed by taking up to k blocks after the immutable tip -- on this peer's candidate fragment. firstBranch :: Peers EvolvingPeer -> Peer EvolvingPeer -firstBranch Peers {honest, others} = +firstBranch peers = fromMaybe newest $ - minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList others)) + minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList (adversarialPeers'' peers))) where - newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (honest : toList others) + newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (toList (honestPeers'' peers) ++ toList (adversarialPeers'' peers)) forkAnchor = fromWithOrigin 0 . AF.anchorToSlotNo . AF.anchor . forkSuffix . value hasForked Peer {value = EvolvingPeer {candidate, forkSlot}} = AF.headSlot candidate >= forkSlot @@ -325,7 +326,7 @@ firstBranch Peers {honest, others} = -- for all peers, and then taking the earliest among the results. immutableTip :: Peers EvolvingPeer -> AF.Point (Header TestBlock) immutableTip peers = - minimum (lastHonest <$> toList (others peers)) + minimum (lastHonest <$> toList (adversarialPeers'' peers)) where lastHonest Peer {value = EvolvingPeer {candidate, forkSlot = NotOrigin forkSlot}} = AF.headPoint $ @@ -470,7 +471,7 @@ prop_densityDisconnectTriggersChainSel = ( \GenesisTest {gtBlockTree, gtSchedule} stateView@StateView {svTipBlock} -> let - othersCount = Map.size (others gtSchedule) + othersCount = Map.size (adversarialPeers gtSchedule) exnCorrect = case exceptionsByComponent ChainSyncClient stateView of [fromException -> Just DensityTooLow] -> True [] | othersCount == 0 -> True @@ -482,16 +483,6 @@ prop_densityDisconnectTriggersChainSel = ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - - getTrunkTip :: HasHeader blk => BlockTree blk -> blk - getTrunkTip tree = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - -- 1. The adversary advertises blocks up to the intersection. -- 2. The honest node advertises all its chain, which is -- long enough to be blocked by the LoE. @@ -506,16 +497,14 @@ prop_densityDisconnectTriggersChainSel = intersect = case btbPrefix branch of (AF.Empty _) -> Origin (_ AF.:> tipBlock) -> At tipBlock - advTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + advTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain up to the intersection. - [ (Time 0, scheduleTipPoint trunkTip), + [[(Time 0, scheduleTipPoint trunkTip), (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) - ] + ]] -- Advertise the alternate branch early, but wait for the honest -- node to have served its chain before disclosing the alternate -- branch is not dense enough. diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index fe6b842b54..c3cd74a925 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -19,13 +19,14 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, mkPeers) +import Test.Consensus.PointSchedule.Peers (Peers, peers') import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, scheduleHeaderPoint, scheduleTipPoint) import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestEnv (adjustQuickCheckTests) tests :: TestTree @@ -76,27 +77,18 @@ prop_adversaryHitsTimeouts timeoutsEnabled = in selectedCorrect && exceptionsCorrect ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - delaySchedule :: HasHeader blk => BlockTree blk -> Peers (PeerSchedule blk) delaySchedule tree = - let trunkTip = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock + let trunkTip = getTrunkTip tree branch = getOnlyBranch tree intersectM = case btbPrefix branch of (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock - branchTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + branchTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. - ( (Time 0, scheduleTipPoint trunkTip) : case intersectM of + [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of Nothing -> [ (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) @@ -107,7 +99,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled = (Time 5, scheduleHeaderPoint trunkTip), (Time 5, scheduleBlockPoint trunkTip) ] - ) + ] -- The one adversarial peer advertises and serves up to the -- intersection early, then waits more than the short wait timeout. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index 4c97bac307..0c861ebe9f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -22,7 +22,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, mkPeers, +import Test.Consensus.PointSchedule.Peers (Peers, peers', peersOnlyHonest) import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, @@ -30,6 +30,7 @@ import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestEnv (adjustQuickCheckTests) tests :: TestTree @@ -212,27 +213,18 @@ prop_delayAttack lopEnabled = in selectedCorrect && exceptionsCorrect ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - delaySchedule :: (HasHeader blk) => BlockTree blk -> Peers (PeerSchedule blk) delaySchedule tree = - let trunkTip = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock + let trunkTip = getTrunkTip tree branch = getOnlyBranch tree intersectM = case btbPrefix branch of (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock - branchTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + branchTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. - ( (Time 0, scheduleTipPoint trunkTip) : case intersectM of + [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of Nothing -> [ (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) @@ -243,7 +235,7 @@ prop_delayAttack lopEnabled = (Time 5, scheduleHeaderPoint trunkTip), (Time 5, scheduleBlockPoint trunkTip) ] - ) + ] -- Advertise the alternate branch early, but don't serve it -- past the intersection, and wait for LoP bucket. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index 98e4c8a6dc..ebde97568c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -40,8 +40,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..), - value) +import Test.Consensus.PointSchedule.Peers (Peers (..), isHonestPeerId) import Test.Consensus.PointSchedule.Shrinking (shrinkByRemovingAdversaries, shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer @@ -52,6 +51,7 @@ import Test.QuickCheck import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.QuickCheck (le) import Test.Util.TestBlock (TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize, @@ -89,13 +89,13 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = -- to the governor that the density is too low. longerThanGenesisWindow ==> conjoin [ - counterexample "The honest peer was disconnected" (HonestPeer `notElem` disconnected), + counterexample "An honest peer was disconnected" (not $ any isHonestPeerId disconnected), counterexample ("The immutable tip is not honest: " ++ show immutableTip) $ property (isHonest immutableTipHash), immutableTipIsRecent ] where - advCount = Map.size (others (gtSchedule genesisTest)) + advCount = Map.size (adversarialPeers (gtSchedule genesisTest)) immutableTipIsRecent = counterexample ("Age of the immutable tip: " ++ show immutableTipAge) $ @@ -129,7 +129,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = [] -> "No peers were disconnected" peers -> "Some peers were disconnected: " ++ intercalate ", " (condense <$> peers) - honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ value $ honest $ gtSchedule genesisTest + honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ gtSchedule genesisTest GenesisTest {gtBlockTree, gtGenesisWindow = GenesisWindow s, gtDelay = Delta d} = genesisTest @@ -161,7 +161,12 @@ prop_serveAdversarialBranches = forAllGenesisTest theProperty genUniformSchedulePoints :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) -genUniformSchedulePoints gt = stToGen (uniformPoints (gtBlockTree gt)) +genUniformSchedulePoints gt = stToGen (uniformPoints pointsGeneratorParams (gtBlockTree gt)) + where + pointsGeneratorParams = PointsGeneratorParams + { pgpExtraHonestPeers = fromIntegral $ gtExtraHonestPeers gt + , pgpDowntime = NoDowntime + } -- Note [Leashing attacks] -- @@ -212,7 +217,7 @@ prop_leashingAttackStalling = genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) genLeashingSchedule genesisTest = do Peers honest advs0 <- ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest - advs <- mapM (mapM dropRandomPoints) advs0 + advs <- mapM dropRandomPoints advs0 pure $ Peers honest advs disableBoringTimeouts gt = @@ -266,15 +271,15 @@ prop_leashingAttackTimeLimited = -- | A schedule which doesn't run past the last event of the honest peer genTimeLimitedSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) genTimeLimitedSchedule genesisTest = do - Peers honest advs0 <- genUniformSchedulePoints genesisTest + Peers honests advs0 <- genUniformSchedulePoints genesisTest let timeLimit = estimateTimeBound (gtChainSyncTimeouts genesisTest) (gtLoPBucketParams genesisTest) - (value honest) - (map value $ Map.elems advs0) - advs = fmap (fmap (takePointsUntil timeLimit)) advs0 - extendedHonest = extendScheduleUntil timeLimit <$> honest - pure $ Peers extendedHonest advs + (getHonestPeer honests) + (Map.elems advs0) + advs = fmap (takePointsUntil timeLimit) advs0 + extendedHonests = extendScheduleUntil timeLimit <$> honests + pure $ Peers extendedHonests advs takePointsUntil limit = takeWhile ((<= limit) . fst) @@ -390,7 +395,7 @@ prop_downtime :: Property prop_downtime = forAllGenesisTest (genChains (QC.choose (1, 4)) `enrichedWith` \ gt -> - ensureScheduleDuration gt <$> stToGen (uniformPointsWithDowntime (gtSecurityParam gt) (gtBlockTree gt))) + ensureScheduleDuration gt <$> stToGen (uniformPoints (pointsGeneratorParams gt) (gtBlockTree gt))) defaultSchedulerConfig { scEnableLoE = True @@ -402,3 +407,9 @@ prop_downtime = forAllGenesisTest shrinkPeerSchedules theProperty + + where + pointsGeneratorParams gt = PointsGeneratorParams + { pgpExtraHonestPeers = fromIntegral (gtExtraHonestPeers gt) + , pgpDowntime = DowntimeWithSecurityParam (gtSecurityParam gt) + } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index 2954a6af94..c3c6298f43 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -16,6 +16,7 @@ import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) import Data.Foldable (for_) import Data.Functor (void) +import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Ouroboros.Consensus.Block @@ -460,7 +461,7 @@ runPointSchedule :: m (StateView TestBlock) runPointSchedule schedulerConfig genesisTest tracer0 = withRegistry $ \registry -> do - peerSim <- makePeerSimulatorResources tracer gtBlockTree (getPeerIds gtSchedule) + peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds gtSchedule) lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim (chainDb, stateViewTracers) <- runScheduler (Tracer $ traceWith tracer . TraceSchedulerEvent) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs index 937f4830ce..61b1e7914b 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs @@ -598,11 +598,6 @@ instance Condense RenderCell where CellEllipsis -> " .. " RenderCell _ cell -> condense cell -renderPeerId :: PeerId -> String -renderPeerId = \case - HonestPeer -> "honest" - PeerId p -> p - slotWidth :: NonEmpty Cell -> SlotWidth slotWidth = maximum . fmap cellWidth @@ -612,7 +607,7 @@ slotWidth = CellPeers peerIds -> SlotWidth (sum (labelWidth <$> peerIds)) _ -> 1 - labelWidth pid = 2 + length (renderPeerId pid) + labelWidth pid = 2 + length (show pid) sortWidth = \case CellHere as -> sum (pointWidth <$> as) @@ -773,7 +768,7 @@ renderSlotNo config width num = renderPeers :: [PeerId] -> Col renderPeers peers = - ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ renderPeerId p)) | p <- peers] + ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ show p)) | p <- peers] renderCell :: RenderConfig -> RenderCell -> Col renderCell config@RenderConfig {ellipsis} = \case diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs index a883a4fc84..3171bbf826 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs @@ -23,6 +23,7 @@ module Test.Consensus.PointSchedule ( BlockFetchTimeout (..) , CSJParams (..) + , DowntimeParams (..) , ForecastRange (..) , GenesisTest (..) , GenesisTestFull @@ -30,6 +31,7 @@ module Test.Consensus.PointSchedule ( , LoPBucketParams (..) , PeerSchedule , PeersSchedule + , PointsGeneratorParams (..) , RunGenesisTestResult (..) , enrichedWith , ensureScheduleDuration @@ -43,16 +45,17 @@ module Test.Consensus.PointSchedule ( , prettyPeersSchedule , stToGen , uniformPoints - , uniformPointsWithDowntime ) where import Cardano.Slotting.Time (SlotLength) +import Control.Monad (replicateM) import Control.Monad.Class.MonadTime.SI (Time (Time), addTime, diffTime) import Control.Monad.ST (ST) import Data.Foldable (toList) import Data.Functor (($>)) import Data.List (mapAccumL, partition, scanl') +import qualified Data.Map.Strict as Map import Data.Maybe (catMaybes, fromMaybe, mapMaybe) import Data.Time (DiffTime) import Data.Word (Word64) @@ -76,7 +79,7 @@ import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule.NodeState (NodeState (..), genesisNodeState) import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..), - mkPeers, peersList) + peers', peersList) import Test.Consensus.PointSchedule.SinglePeer (IsTrunk (IsBranch, IsTrunk), PeerScheduleParams (..), SchedulePoint (..), defaultPeerScheduleParams, mergeOn, @@ -96,6 +99,9 @@ prettyPeersSchedule :: PeersSchedule blk -> [String] prettyPeersSchedule peers = + [ "honest peers: " ++ show (Map.size (honestPeers peers)) + , "adversaries: " ++ show (Map.size (adversarialPeers peers)) + ] ++ zipWith3 (\number time peerState -> number ++ ": " ++ peerState ++ " @ " ++ time @@ -191,7 +197,7 @@ longRangeAttack :: longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do honest <- peerScheduleFromTipPoints g honParams [(IsTrunk, [AF.length btTrunk - 1])] btTrunk [] adv <- peerScheduleFromTipPoints g advParams [(IsBranch, [AF.length (btbFull branch) - 1])] btTrunk [btbFull branch] - pure (mkPeers honest [adv]) + pure (peers' [honest] [adv]) where honParams = defaultPeerScheduleParams {pspHeaderDelayInterval = (0.3, 0.4)} advParams = defaultPeerScheduleParams {pspTipDelayInterval = (0, 0.1)} @@ -199,22 +205,45 @@ longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do longRangeAttack _ _ = error "longRangeAttack can only deal with single adversary" --- | Generate a schedule in which the trunk and branches are served by one peer each, using --- a single tip point, without specifically assigned delay intervals like in --- 'newLongRangeAttack'. +data PointsGeneratorParams = PointsGeneratorParams { + pgpExtraHonestPeers :: Int, + pgpDowntime :: DowntimeParams +} + +data DowntimeParams = NoDowntime | DowntimeWithSecurityParam SecurityParam + +uniformPoints :: + (StatefulGen g m, AF.HasHeader blk) => + PointsGeneratorParams -> + BlockTree blk -> + g -> + m (PeersSchedule blk) +uniformPoints PointsGeneratorParams {pgpExtraHonestPeers, pgpDowntime} = case pgpDowntime of + NoDowntime -> uniformPointsWithExtraHonestPeers pgpExtraHonestPeers + DowntimeWithSecurityParam k -> uniformPointsWithExtraHonestPeersAndDowntime pgpExtraHonestPeers k + +-- | Generate a schedule in which the trunk is served by @pgpExtraHonestPeers + 1@ peers, +-- and extra branches are served by one peer each, using a single tip point, +-- without specifically assigned delay intervals like in 'newLongRangeAttack'. -- -- Include rollbacks in a percentage of adversaries, in which case that peer uses two branchs. -- -uniformPoints :: +uniformPointsWithExtraHonestPeers :: (StatefulGen g m, AF.HasHeader blk) => + Int -> BlockTree blk -> g -> m (PeersSchedule blk) -uniformPoints BlockTree {btTrunk, btBranches} g = do +uniformPointsWithExtraHonestPeers + extraHonestPeers + BlockTree {btTrunk, btBranches} + g + = do honestTip0 <- firstTip btTrunk - honest <- mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] + honests <- replicateM (extraHonestPeers + 1) $ + mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] advs <- takeBranches btBranches - pure (mkPeers honest advs) + pure (peers' honests advs) where takeBranches = \case [] -> pure [] @@ -301,16 +330,16 @@ bumpTips tips = = (tn, (t0, p)) step ts a = (ts, a) -syncTips :: [(Time, SchedulePoint blk)] -> [[(Time, SchedulePoint blk)]] -> ([(Time, SchedulePoint blk)], [[(Time, SchedulePoint blk)]]) -syncTips honest advs = - (bump honest, bump <$> advs) +syncTips :: [[(Time, SchedulePoint blk)]] -> [[(Time, SchedulePoint blk)]] -> ([[(Time, SchedulePoint blk)]], [[(Time, SchedulePoint blk)]]) +syncTips honests advs = + (bump <$> honests, bump <$> advs) where bump = bumpTips earliestTips earliestTips = chooseEarliest <$> zipPadN (tipTimes <$> scheds) - scheds = honest : advs + scheds = honests <> advs chooseEarliest times = minimum (fromMaybe (Time 0) <$> times) --- | This is a variant of 'uniformPoints' that uses multiple tip points, used to simulate node downtimes. +-- | This is a variant of 'uniformPointsWithExtraHonestPeers' that uses multiple tip points, used to simulate node downtimes. -- Ultimately, this should be replaced by a redesign of the peer schedule generator that is aware of node liveness -- intervals. -- @@ -320,23 +349,30 @@ syncTips honest advs = -- The second tip is the last block of each branch. -- -- Includes rollbacks in some schedules. -uniformPointsWithDowntime :: +uniformPointsWithExtraHonestPeersAndDowntime :: (StatefulGen g m, AF.HasHeader blk) => + Int -> SecurityParam -> BlockTree blk -> g -> m (PeersSchedule blk) -uniformPointsWithDowntime (SecurityParam k) BlockTree {btTrunk, btBranches} g = do +uniformPointsWithExtraHonestPeersAndDowntime + extraHonestPeers + (SecurityParam k) + BlockTree {btTrunk, btBranches} + g + = do let kSlot = withOrigin 0 (fromIntegral . unSlotNo) (AF.headSlot (AF.takeOldest (fromIntegral k) btTrunk)) midSlot = (AF.length btTrunk) `div` 2 lowerBound = max kSlot midSlot pauseSlot <- SlotNo . fromIntegral <$> Random.uniformRM (lowerBound, AF.length btTrunk - 1) g honestTip0 <- firstTip pauseSlot btTrunk - honest <- mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] + honests <- replicateM (extraHonestPeers + 1) $ + mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] advs <- takeBranches pauseSlot btBranches - let (honest', advs') = syncTips honest advs - pure (mkPeers honest' advs') + let (honests', advs') = syncTips honests advs + pure (peers' honests' advs') where takeBranches pause = \case [] -> pure [] @@ -391,7 +427,6 @@ uniformPointsWithDowntime (SecurityParam k) BlockTree {btTrunk, btBranches} g = rollbackProb = 0.2 - newtype ForecastRange = ForecastRange { unForecastRange :: Word64 } deriving (Show) @@ -425,6 +460,13 @@ data GenesisTest blk schedule = GenesisTest gtLoPBucketParams :: LoPBucketParams, gtCSJParams :: CSJParams, gtSlotLength :: SlotLength, + -- | The number of extra honest peers we want in the test. + -- It is stored here for convenience, and because it may affect schedule and block tree generation. + -- + -- There will be at most one adversarial peer per alternative branch in the block tree + -- (exactly one per branch if no adversary does a rollback), + -- and `1 + gtExtraHonestPeers` honest peers. + gtExtraHonestPeers :: Word, gtSchedule :: schedule } @@ -499,11 +541,9 @@ duplicateLastPoint d xs = in xs ++ [(addTime d t, p)] ensureScheduleDuration :: GenesisTest blk a -> PeersSchedule blk -> PeersSchedule blk -ensureScheduleDuration gt Peers {honest, others} = - Peers {honest = extendHonest, others} +ensureScheduleDuration gt peers = + duplicateLastPoint endingDelay <$> peers where - extendHonest = duplicateLastPoint endingDelay <$> honest - endingDelay = let cst = gtChainSyncTimeouts gt bft = gtBlockFetchTimeouts gt @@ -513,5 +553,4 @@ ensureScheduleDuration gt Peers {honest, others} = , busyTimeout bft , streamingTimeout bft ]) - - peerCount = 1 + length others + peerCount = length (peersList peers) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs index 9d0a084c08..13742d1d65 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs @@ -15,13 +15,19 @@ module Test.Consensus.PointSchedule.Peers ( Peer (..) , PeerId (..) , Peers (..) + , adversarialPeers' + , adversarialPeers'' + , deletePeer , enumerateAdversaries , fromMap , fromMap' , getPeer , getPeerIds - , mkPeers - , mkPeers' + , honestPeers' + , honestPeers'' + , isAdversarialPeerId + , isHonestPeerId + , peers' , peersFromPeerIdList , peersFromPeerIdList' , peersFromPeerList @@ -33,8 +39,6 @@ module Test.Consensus.PointSchedule.Peers ( ) where import Data.Hashable (Hashable) -import Data.List.NonEmpty (NonEmpty ((:|))) -import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.String (IsString (fromString)) @@ -45,20 +49,25 @@ import Ouroboros.Consensus.Util.Condense (Condense (..), condenseListWithPadding) -- | Identifier used to index maps and specify which peer is active during a tick. -data PeerId = - HonestPeer - | - PeerId String +data PeerId + = HonestPeer Int + | AdversarialPeer Int deriving (Eq, Generic, Show, Ord, NoThunks) instance IsString PeerId where - fromString "honest" = HonestPeer - fromString i = PeerId i + fromString s = case words s of + ["honest"] -> HonestPeer 1 + ["honest", n] -> HonestPeer (read n) + ["adversary"] -> AdversarialPeer 1 + ["adversary", n] -> AdversarialPeer (read n) + _ -> error $ "fromString: invalid PeerId: " ++ s instance Condense PeerId where condense = \case - HonestPeer -> "honest" - PeerId name -> name + HonestPeer 1 -> "honest" + HonestPeer n -> "honest " ++ show n + AdversarialPeer 1 -> "adversary" + AdversarialPeer n -> "adversary " ++ show n instance CondenseList PeerId where condenseList = condenseListWithPadding PadRight @@ -94,119 +103,158 @@ instance CondenseList a => CondenseList (Peer a) where (condenseList $ value <$> peers) -- | General-purpose functor for a set of peers. --- --- REVIEW: There is a duplicate entry for the honest peer, here. We should --- probably either have only the 'Map' or have the keys of the map be 'String'? --- --- Alternatively, we could just have 'newtype PeerId = PeerId String' with an --- alias for 'HonestPeer = PeerId "honest"'? -data Peers a = - Peers { - honest :: Peer a, - others :: Map PeerId (Peer a) +data Peers a = Peers + { honestPeers :: Map Int a, + adversarialPeers :: Map Int a } deriving (Eq, Show) +-- | Variant of 'honestPeers' that returns a map with 'PeerId's as keys. +honestPeers' :: Peers a -> Map PeerId a +honestPeers' = Map.mapKeysMonotonic HonestPeer . honestPeers + +-- | Variant of 'honestPeers' that returns a map with 'PeerId's as keys and +-- values as 'Peer's. +honestPeers'' :: Peers a -> Map PeerId (Peer a) +honestPeers'' = Map.mapWithKey Peer . honestPeers' + +-- | Variant of 'adversarialPeers' that returns a map with 'PeerId's as keys. +adversarialPeers' :: Peers a -> Map PeerId a +adversarialPeers' peers = Map.mapKeysMonotonic AdversarialPeer $ adversarialPeers peers + +-- | Variant of 'adversarialPeers' that returns a map with 'PeerId's as keys and +-- values as 'Peer's. +adversarialPeers'' :: Peers a -> Map PeerId (Peer a) +adversarialPeers'' = Map.mapWithKey Peer . adversarialPeers' + instance Functor Peers where - fmap f Peers {honest, others} = Peers {honest = f <$> honest, others = fmap f <$> others} + fmap f Peers {honestPeers, adversarialPeers} = + Peers + { honestPeers = f <$> honestPeers, + adversarialPeers = f <$> adversarialPeers + } instance Foldable Peers where - foldMap f Peers {honest, others} = (f . value) honest <> foldMap (f . value) others + foldMap f Peers {honestPeers, adversarialPeers} = + foldMap f honestPeers <> foldMap f adversarialPeers -- | A set of peers with only one honest peer carrying the given value. peersOnlyHonest :: a -> Peers a peersOnlyHonest value = - Peers { - honest = Peer {name = HonestPeer, value}, - others = Map.empty + Peers + { honestPeers = Map.singleton 1 value, + adversarialPeers = Map.empty } -- | Extract all 'PeerId's. -getPeerIds :: Peers a -> NonEmpty PeerId -getPeerIds peers = HonestPeer :| Map.keys (others peers) +getPeerIds :: Peers a -> [PeerId] +getPeerIds Peers {honestPeers, adversarialPeers} = + (HonestPeer <$> Map.keys honestPeers) ++ (AdversarialPeer <$> Map.keys adversarialPeers) getPeer :: PeerId -> Peers a -> Peer a -getPeer pid peers - | HonestPeer <- pid - = honest peers - | otherwise - = others peers Map.! pid +getPeer (HonestPeer n) Peers {honestPeers} = Peer (HonestPeer n) (honestPeers Map.! n) +getPeer (AdversarialPeer n) Peers {adversarialPeers} = Peer (AdversarialPeer n) (adversarialPeers Map.! n) updatePeer :: (a -> (a, b)) -> PeerId -> Peers a -> (Peers a, b) -updatePeer f pid Peers {honest, others} - | HonestPeer <- pid - , let (a, b) = f (value honest) - = (Peers {honest = a <$ honest, others}, b) - | otherwise - , let p = others Map.! pid - (a, b) = f (value p) - = (Peers {honest, others = Map.adjust (a <$) pid others}, b) +updatePeer f (HonestPeer n) Peers {honestPeers, adversarialPeers} = + let (a, b) = f (honestPeers Map.! n) + in (Peers {honestPeers = Map.insert n a honestPeers, adversarialPeers}, b) +updatePeer f (AdversarialPeer n) Peers {honestPeers, adversarialPeers} = + let (a, b) = f (adversarialPeers Map.! n) + in (Peers {honestPeers, adversarialPeers = Map.insert n a adversarialPeers}, b) -- | Convert 'Peers' to a list of 'Peer'. -peersList :: Peers a -> NonEmpty (Peer a) -peersList Peers {honest, others} = - honest :| Map.elems others +peersList :: Peers a -> [Peer a] +peersList Peers {honestPeers, adversarialPeers} = + Map.foldrWithKey + (\k v -> (Peer (HonestPeer k) v :)) + ( Map.foldrWithKey + (\k v -> (Peer (AdversarialPeer k) v :)) + [] + adversarialPeers + ) + honestPeers enumerateAdversaries :: [PeerId] -enumerateAdversaries = - (\ n -> PeerId ("adversary " ++ show n)) <$> [1 :: Int ..] +enumerateAdversaries = AdversarialPeer <$> [1 ..] -- | Construct 'Peers' from values, adding adversary names based on the default schema. --- A single adversary gets the ID @adversary@, multiple get enumerated as @adversary N@. -mkPeers :: a -> [a] -> Peers a -mkPeers h as = - Peers (Peer HonestPeer h) (Map.fromList (mkPeer <$> advs as)) - where - mkPeer (pid, a) = (pid, Peer pid a) - advs [a] = [("adversary", a)] - advs _ = zip enumerateAdversaries as - --- | Make a 'Peers' structure from the honest value and the other peers. Fail if --- one of the other peers is the 'HonestPeer'. -mkPeers' :: a -> [Peer a] -> Peers a -mkPeers' value prs = - Peers (Peer HonestPeer value) (Map.fromList $ dupAdvPeerId <$> prs) - where - -- | Duplicate an adversarial peer id; fail if honest. - dupAdvPeerId :: Peer a -> (PeerId, Peer a) - dupAdvPeerId (Peer HonestPeer _) = error "cannot be the honest peer" - dupAdvPeerId peer@(Peer pid _) = (pid, peer) - --- | Make a 'Peers' structure from a non-empty list of peers. Fail if the honest --- peer is not exactly once in the list. -peersFromPeerList :: NonEmpty (Peer a) -> Peers a -peersFromPeerList = - uncurry mkPeers' . extractHonestPeer . NonEmpty.toList +peers' :: [a] -> [a] -> Peers a +peers' hs as = + Peers + { honestPeers = Map.fromList $ zip [1 ..] hs, + adversarialPeers = Map.fromList $ zip [1 ..] as + } + +-- | Make a 'Peers' structure from individual 'Peer's. +peersFromPeerList :: [Peer a] -> Peers a +peersFromPeerList peers = + let (hs, as) = partitionPeers peers + in Peers + { honestPeers = Map.fromList hs, + adversarialPeers = Map.fromList as + } where - -- | Return the value associated with the honest peer and the list of peers - -- excluding the honest one. - extractHonestPeer :: [Peer a] -> (a, [Peer a]) - extractHonestPeer [] = error "could not find honest peer" - extractHonestPeer (Peer HonestPeer value : peers) = (value, peers) - extractHonestPeer (peer : peers) = (peer :) <$> extractHonestPeer peers - --- | Make a 'Peers' structure from a non-empty list of peer ids and a default --- value. Fails if the honest peer is not exactly once in the list. -peersFromPeerIdList :: NonEmpty PeerId -> a -> Peers a + partitionPeers :: [Peer a] -> ([(Int, a)], [(Int, a)]) + partitionPeers = + foldl + ( \(hs, as) (Peer pid v) -> case pid of + HonestPeer n -> ((n, v) : hs, as) + AdversarialPeer n -> (hs, (n, v) : as) + ) + ([], []) + +-- | Make a 'Peers' structure from a list of peer ids and a default value. +peersFromPeerIdList :: [PeerId] -> a -> Peers a peersFromPeerIdList = flip $ \val -> peersFromPeerList . fmap (flip Peer val) -- | Like 'peersFromPeerIdList' with @()@. -peersFromPeerIdList' :: NonEmpty PeerId -> Peers () +peersFromPeerIdList' :: [PeerId] -> Peers () peersFromPeerIdList' = flip peersFromPeerIdList () -toMap :: Peers a -> Map PeerId (Peer a) -toMap Peers{honest, others} = Map.insert HonestPeer honest others - -- | Same as 'toMap' but the map contains unwrapped values. toMap' :: Peers a -> Map PeerId a -toMap' = fmap (\(Peer _ v) -> v) . toMap +toMap' Peers {honestPeers, adversarialPeers} = + Map.union + (Map.mapKeysMonotonic HonestPeer honestPeers) + (Map.mapKeysMonotonic AdversarialPeer adversarialPeers) -fromMap :: Map PeerId (Peer a) -> Peers a -fromMap peers = Peers{ - honest = peers Map.! HonestPeer, - others = Map.delete HonestPeer peers - } +toMap :: Peers a -> Map PeerId (Peer a) +toMap = Map.mapWithKey Peer . toMap' -- | Same as 'fromMap' but the map contains unwrapped values. fromMap' :: Map PeerId a -> Peers a -fromMap' = fromMap . Map.mapWithKey Peer +fromMap' peers = + let (honestPeers, adversarialPeers) = + Map.mapEitherWithKey + ( \case + HonestPeer _ -> Left + AdversarialPeer _ -> Right + ) + peers + in Peers + { honestPeers = Map.mapKeysMonotonic unHonestPeer honestPeers, + adversarialPeers = Map.mapKeysMonotonic unAdversarialPeer adversarialPeers + } + where + unHonestPeer (HonestPeer n) = n + unHonestPeer _ = error "unHonestPeer: not a honest peer" + unAdversarialPeer (AdversarialPeer n) = n + unAdversarialPeer _ = error "unAdversarialPeer: not an adversarial peer" + +fromMap :: Map PeerId (Peer a) -> Peers a +fromMap = fromMap' . Map.map value + +deletePeer :: PeerId -> Peers a -> Peers a +deletePeer (HonestPeer n) Peers {honestPeers, adversarialPeers} = + Peers {honestPeers = Map.delete n honestPeers, adversarialPeers} +deletePeer (AdversarialPeer n) Peers {honestPeers, adversarialPeers} = + Peers {honestPeers, adversarialPeers = Map.delete n adversarialPeers} + +isHonestPeerId :: PeerId -> Bool +isHonestPeerId (HonestPeer _) = True +isHonestPeerId _ = False + +isAdversarialPeerId :: PeerId -> Bool +isAdversarialPeerId (AdversarialPeer _) = True +isAdversarialPeerId _ = False diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index 8332e62021..a574077634 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -2,8 +2,8 @@ {-# LANGUAGE NamedFieldPuns #-} module Test.Consensus.PointSchedule.Shrinking ( - shrinkByRemovingAdversaries -- | Exported only for testing (that is, checking the properties of the function) + shrinkByRemovingAdversaries , shrinkHonestPeer , shrinkPeerSchedules ) where @@ -11,7 +11,9 @@ module Test.Consensus.PointSchedule.Shrinking ( import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Containers.ListUtils (nubOrd) +import Data.Function ((&)) import Data.Functor ((<&>)) +import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (mapMaybe, maybeToList) import Ouroboros.Network.AnchoredFragment (AnchoredFragment, @@ -22,9 +24,10 @@ import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule (GenesisTest (..), GenesisTestFull, PeerSchedule, PeersSchedule, peerSchedulesBlocks) -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..)) +import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (shrinkList) +import Test.Util.PartialAccessors import Test.Util.TestBlock (TestBlock, isAncestorOf, isStrictAncestorOf) @@ -38,16 +41,19 @@ shrinkPeerSchedules :: [GenesisTestFull TestBlock] shrinkPeerSchedules genesisTest _stateView = let trimmedBlockTree sch = trimBlockTree' sch (gtBlockTree genesisTest) - shrunkOthers = shrinkOtherPeers shrinkPeerSchedule (gtSchedule genesisTest) <&> - \shrunkSchedule -> genesisTest - { gtSchedule = shrunkSchedule - , gtBlockTree = trimmedBlockTree shrunkSchedule - } - shrunkHonest = shrinkHonestPeer - (gtSchedule genesisTest) - -- No need to update the tree here, shrinking the honest peer never discards blocks - <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} - in shrunkOthers ++ shrunkHonest + shrunkAdversarialPeers = + shrinkAdversarialPeers shrinkPeerSchedule (gtSchedule genesisTest) + <&> \shrunkSchedule -> + genesisTest + { gtSchedule = shrunkSchedule, + gtBlockTree = trimmedBlockTree shrunkSchedule + } + shrunkHonestPeers = + shrinkHonestPeers + (gtSchedule genesisTest) + -- No need to update the tree here, shrinking the honest peers never discards blocks + <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} + in shrunkAdversarialPeers ++ shrunkHonestPeers -- | Shrink a 'Peers PeerSchedule' by removing adversaries. This does not affect -- the honest peer; and it does not remove ticks from the schedules of the @@ -57,7 +63,7 @@ shrinkByRemovingAdversaries :: StateView TestBlock -> [GenesisTestFull TestBlock] shrinkByRemovingAdversaries genesisTest _stateView = - shrinkOtherPeers (const []) (gtSchedule genesisTest) <&> \shrunkSchedule -> + shrinkAdversarialPeers (const []) (gtSchedule genesisTest) <&> \shrunkSchedule -> let trimmedBlockTree = trimBlockTree' shrunkSchedule (gtBlockTree genesisTest) in (genesisTest{gtSchedule = shrunkSchedule, gtBlockTree = trimmedBlockTree}) @@ -68,25 +74,45 @@ shrinkPeerSchedule = shrinkList (const []) -- | Shrink the 'others' field of a 'Peers' structure by attempting to remove -- peers or by shrinking their values using the given shrinking function. -shrinkOtherPeers :: (a -> [a]) -> Peers a -> [Peers a] -shrinkOtherPeers shrink Peers{honest, others} = - map (Peers honest . Map.fromList) $ - shrinkList (traverse (traverse shrink)) $ Map.toList others +shrinkAdversarialPeers :: (a -> [a]) -> Peers a -> [Peers a] +shrinkAdversarialPeers shrink Peers {honestPeers, adversarialPeers} = + map (Peers honestPeers . Map.fromList) $ + shrinkList (traverse shrink) $ + Map.toList adversarialPeers + +-- | Shrinks honest peers by removing ticks. Because we are manipulating +-- 'PeerSchedule' at this point, there is no proper notion of a tick. Instead, +-- we remove points from the honest 'PeerSchedule', and move all other points +-- sooner, including those on the other schedules. We check that this operation +-- neither changes the final state of the honest peer, nor removes points from +-- the other schedules. +shrinkHonestPeers :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] +shrinkHonestPeers Peers {honestPeers, adversarialPeers} = + Map.toList honestPeers + & concatMap + ( \(n, schedule) -> + shrinkTheHonestPeer schedule (Map.delete n honestPeers) adversarialPeers + & map + ( \(schedule', otherHonestPeers', otherAdversarialPeers') -> + Peers + { honestPeers = Map.insert n schedule' otherHonestPeers', + adversarialPeers = otherAdversarialPeers' + } + ) + ) --- | Shrinks an honest peer by removing ticks. --- Because we are manipulating 'PeerSchedule' at that point, there is no proper --- notion of a tick. Instead, we remove points of the honest 'PeerSchedule', --- and move all other points sooner, including those on the adversarial schedule. --- We check that this operation neither changes the final state of the honest peer, --- nor that it removes points from the adversarial schedules. -shrinkHonestPeer :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] -shrinkHonestPeer Peers{honest, others} = do +shrinkTheHonestPeer :: + PeerSchedule blk -> + Map Int (PeerSchedule blk) -> + Map Int (PeerSchedule blk) -> + [(PeerSchedule blk, Map Int (PeerSchedule blk), Map Int (PeerSchedule blk))] +shrinkTheHonestPeer theSchedule otherHonestPeers otherAdversarialPeers = do (at, speedUpBy) <- splits - (honest', others') <- maybeToList $ do - honest' <- traverse (speedUpHonestSchedule at speedUpBy) honest - others' <- mapM (traverse (speedUpAdversarialSchedule at speedUpBy)) others - pure (honest', others') - pure $ Peers honest' others' + maybeToList $ do + theSchedule' <- speedUpTheSchedule at speedUpBy theSchedule + otherHonestPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherHonestPeers + otherAdversarialPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherAdversarialPeers + pure (theSchedule', otherHonestPeers', otherAdversarialPeers') where -- | A list of non-zero time intervals between successive points of the honest schedule splits :: [(Time, DiffTime)] @@ -96,15 +122,29 @@ shrinkHonestPeer Peers{honest, others} = do then Nothing else Just (t1, diffTime t2 t1) ) - (zip (value honest) (drop 1 $ value honest)) + (zip theSchedule (drop 1 theSchedule)) + +-- | For testing purposes only. Assumes there is exactly one honest peer and +-- shrinks it. +shrinkHonestPeer :: PeersSchedule blk -> [PeersSchedule blk] +shrinkHonestPeer Peers {honestPeers, adversarialPeers} = + shrinkTheHonestPeer (getHonestPeer honestPeers) Map.empty adversarialPeers + & map + ( \(schedule', _, otherAdversarialPeers') -> + Peers + { honestPeers = Map.singleton 1 schedule', + adversarialPeers = otherAdversarialPeers' + } + ) --- | Speeds up an honest schedule after `at` time, by `speedUpBy`. --- This “speeding up” is done by subtracting @speedUpBy@ to all points after @at@, --- and removing those points if they fall before `at`. We check that the operation --- doesn't change the final state of the peer, i.e. it doesn't remove all TP, HP, and BP --- in the sped up part. -speedUpHonestSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpHonestSchedule at speedUpBy sch = +-- | Speeds up _the_ schedule (that is, the one that we are actually trying to +-- speed up) after `at` time, by `speedUpBy`. This "speeding up" is done by +-- removing `speedUpBy` to all points after `at`, and removing those points if +-- they fall before `at`. We check that the operation doesn't change the final +-- state of the peer, i.e. it doesn't remove all TP, HP, and BP in the sped up +-- part. +speedUpTheSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) +speedUpTheSchedule at speedUpBy sch = if stillValid then Just $ beforeSplit ++ spedUpSchedule else Nothing where (beforeSplit, afterSplit) = span ((< at) . fst) sch @@ -120,12 +160,12 @@ speedUpHonestSchedule at speedUpBy sch = hasHP = any (\case (_, ScheduleHeaderPoint _) -> True; _ -> False) hasBP = any (\case (_, ScheduleBlockPoint _) -> True; _ -> False) --- | Speeds up an adversarial schedule after `at` time, by `speedUpBy`. --- This "speeding up" is done by removing `speedUpBy` to all points after `at`. --- We check that the schedule had no points between `at` and `at + speedUpBy`. --- We also keep the last point where it is, so that the end time stays the same. -speedUpAdversarialSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpAdversarialSchedule at speedUpBy sch = +-- | Speeds up the other schedules after `at` time, by `speedUpBy`. This +-- "speeding up" is done by removing `speedUpBy` to all points after `at`. We +-- check that the schedule had no points between `at` and `at + speedUpBy`. We +-- also keep the last point where it is, so that the end time stays the same. +speedUpOtherSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) +speedUpOtherSchedule at speedUpBy sch = if losesPoint then Nothing else Just $ beforeSplit ++ spedUpSchedule ++ lastPoint where (beforeSplit, afterSplit) = span ((< at) . fst) sch diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index 8332f7be83..ed3b1a0003 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -11,7 +11,7 @@ import Test.Consensus.Genesis.Setup (genChains) import Test.Consensus.Genesis.Tests.Uniform (genUniformSchedulePoints) import Test.Consensus.PointSchedule (PeerSchedule, PeersSchedule, prettyPeersSchedule) -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..)) +import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.Shrinking (shrinkHonestPeer) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (Property, conjoin, counterexample) @@ -45,7 +45,7 @@ lastM [a] = Just a lastM (_:ps) = lastM ps samePeers :: PeersSchedule blk -> PeersSchedule blk -> Bool -samePeers sch1 sch2 = (keys $ others sch1) == (keys $ others sch2) +samePeers sch1 sch2 = (keys $ adversarialPeers sch1) == (keys $ adversarialPeers sch2) -- | Checks whether at least one peer schedule in the second given peers schedule -- is shorter than its corresponding one in the fist given peers schedule. “Shorter” @@ -84,8 +84,8 @@ doesNotRemoveAdversarialPoints original shrunk = samePeers original shrunk && (and $ zipWith (\oldSch newSch -> fmap snd oldSch == fmap snd newSch) - (toList $ (fmap value) $ others original) - (toList $ (fmap value) $ others shrunk) + (toList $ adversarialPeers original) + (toList $ adversarialPeers shrunk) ) checkShrinkProperty :: (PeersSchedule TestBlock -> PeersSchedule TestBlock -> Bool) -> Property diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs new file mode 100644 index 0000000000..470e3f43df --- /dev/null +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs @@ -0,0 +1,42 @@ +{-# LANGUAGE NamedFieldPuns #-} + +-- | Helpers to access particular parts of trees and schedules +-- Those functions are partial, and are designed to only be used in tests. +-- We know they won't fail there, because we generated the structures +-- with the correct properties. +module Test.Util.PartialAccessors ( + getHonestPeer + , getOnlyBranch + , getOnlyBranchTip + , getTrunkTip + ) where + +import qualified Data.Map as Map +import qualified Ouroboros.Network.AnchoredFragment as AF +import Ouroboros.Network.Block (HasHeader) +import Test.Consensus.BlockTree + +getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk +getOnlyBranch BlockTree {btBranches} = case btBranches of + [branch] -> branch + _ -> error "tree must have exactly one alternate branch" + +getTrunkTip :: HasHeader blk => BlockTree blk -> blk +getTrunkTip tree = case btTrunk tree of + (AF.Empty _) -> error "tree must have at least one block" + (_ AF.:> tipBlock) -> tipBlock + +getOnlyBranchTip :: HasHeader blk => BlockTree blk -> blk +getOnlyBranchTip BlockTree {btBranches} = case btBranches of + [branch] -> case btbFull branch of + (AF.Empty _) -> error "alternate branch must have at least one block" + (_ AF.:> tipBlock) -> tipBlock + _ -> error "tree must have exactly one alternate branch" + +getHonestPeer :: Map.Map Int a -> a +getHonestPeer honests = + if Map.size honests /= 1 + then error "there must be exactly one honest peer" + else case Map.lookup 1 honests of + Nothing -> error "the only honest peer must have id 1" + Just p -> p