diff --git a/bench/Chainweb/Pact/Backend/ForkingBench.hs b/bench/Chainweb/Pact/Backend/ForkingBench.hs index fc5d8b819b..df780f3292 100644 --- a/bench/Chainweb/Pact/Backend/ForkingBench.hs +++ b/bench/Chainweb/Pact/Backend/ForkingBench.hs @@ -1,10 +1,12 @@ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedRecordDot #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE RecordWildCards #-} @@ -33,6 +35,7 @@ import Data.FileEmbed import Data.Foldable (toList) import Data.IORef import Data.List (uncons) +import Data.List qualified as List import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as NEL import Data.Map.Strict (Map) @@ -49,6 +52,7 @@ import qualified Data.Yaml as Y import GHC.Generics hiding (from, to) import System.Environment +import System.Logger.Types qualified import System.LogLevel import System.Random @@ -74,11 +78,13 @@ import Chainweb.BlockCreationTime import Chainweb.BlockHeader import Chainweb.BlockHeaderDB import Chainweb.BlockHeaderDB.Internal +import Chainweb.BlockHeight (BlockHeight(..)) import Chainweb.ChainId import Chainweb.Graph import Chainweb.Logger import Chainweb.Mempool.Mempool (BlockFill(..)) import Chainweb.Miner.Pact +import Chainweb.Pact.Backend.Compaction qualified as C import Chainweb.Pact.Backend.Types import Chainweb.Pact.Backend.Utils import Chainweb.Pact.PactService @@ -90,7 +96,7 @@ import Chainweb.Pact.Utils (toTxCreationTime) import Chainweb.Payload import Chainweb.Payload.PayloadStore import Chainweb.Payload.PayloadStore.InMemory -import Chainweb.Test.TestVersions +import Chainweb.Test.TestVersions (slowForkingCpmTestVersion) import Chainweb.Time import Chainweb.Transaction import Chainweb.Utils @@ -111,28 +117,48 @@ _run args = withTempRocksDb "forkingbench" $ \rdb -> -- -------------------------------------------------------------------------- -- -- Benchmarks +data BenchConfig = BenchConfig + { numPriorBlocks :: Word64 + -- ^ number of blocks to create prior to benchmarking + , validate :: Validate + -- ^ whether or not to validate the blocks as part of the benchmark + , compact :: Compact + -- ^ whether or not to compact the pact database prior to benchmarking + } + +defBenchConfig :: BenchConfig +defBenchConfig = BenchConfig + { numPriorBlocks = 100 + , validate = DontValidate + , compact = DontCompact + } + +data Compact = DoCompact | DontCompact + deriving stock (Eq) + +data Validate = DoValidate | DontValidate + deriving stock (Eq) + bench :: RocksDb -> C.Benchmark -bench rdb = C.bgroup "PactService" +bench rdb = C.bgroup "PactService" $ [ forkingBench , doubleForkingBench - , oneBlock True 1 - , oneBlock True 10 - , oneBlock True 50 - , oneBlock True 100 - , oneBlock False 0 - , oneBlock False 1 - , oneBlock False 10 - , oneBlock False 50 - , oneBlock False 100 - ] + ] ++ map (oneBlock defBenchConfig) [1, 10, 50, 100] + ++ map (oneBlock validateCfg) [0, 1, 10, 50, 100] + ++ map (oneBlock compactCfg) [0, 1, 10, 50, 100] + ++ map (oneBlock compactValidateCfg) [1, 10, 50, 100] where - forkingBench = withResources rdb 10 Quiet + validateCfg = defBenchConfig { validate = DoValidate } + compactCfg = defBenchConfig { compact = DoCompact } + compactValidateCfg = compactCfg { validate = DoValidate } + + forkingBench = withResources rdb 10 Quiet DontCompact $ \mainLineBlocks pdb bhdb nonceCounter pactQueue _ -> C.bench "forkingBench" $ C.whnfIO $ do let (T3 _ join1 _) = mainLineBlocks !! 5 void $ playLine pdb bhdb 5 join1 pactQueue nonceCounter - doubleForkingBench = withResources rdb 10 Quiet + doubleForkingBench = withResources rdb 10 Quiet DontCompact $ \mainLineBlocks pdb bhdb nonceCounter pactQueue _ -> C.bench "doubleForkingBench" $ C.whnfIO $ do let (T3 _ join1 _) = mainLineBlocks !! 5 @@ -141,15 +167,21 @@ bench rdb = C.bgroup "PactService" void $ playLine pdb bhdb forkLength1 join1 pactQueue nonceCounter void $ playLine pdb bhdb forkLength2 join1 pactQueue nonceCounter - oneBlock validate txCount = withResources rdb 3 Error go + oneBlock :: BenchConfig -> Int -> C.Benchmark + oneBlock cfg txCount = withResources rdb cfg.numPriorBlocks Error cfg.compact go where - go mainLineBlocks _pdb _bhdb _nonceCounter pactQueue txsPerBlock = + go mainLineBlocks _pdb _bhdb _nonceCounter pactQueue txsPerBlock = do C.bench name $ C.whnfIO $ do writeIORef txsPerBlock txCount let (T3 _ join1 _) = last mainLineBlocks - createBlock validate (ParentHeader join1) (Nonce 1234) pactQueue - name = "block-new" ++ (if validate then "-validated" else "") ++ - "[" ++ show txCount ++ "]" + createBlock cfg.validate (ParentHeader join1) (Nonce 1234) pactQueue + name = "block-new [" + ++ List.intercalate "," + [ "txCount=" ++ show txCount + , "validate=" ++ show (cfg.validate == DoValidate) + , "compact=" ++ show (cfg.compact == DoCompact) + ] + ++ "]" -- -------------------------------------------------------------------------- -- -- Benchmark Function @@ -188,14 +220,14 @@ mineBlock -> PactQueue -> IO (T3 ParentHeader BlockHeader PayloadWithOutputs) mineBlock parent nonce pdb bhdb pact = do - !r@(T3 _ newHeader payload) <- createBlock True parent nonce pact + r@(T3 _ newHeader payload) <- createBlock DoValidate parent nonce pact addNewPayload pdb payload -- NOTE: this doesn't validate the block header, which is fine in this test case unsafeInsertBlockHeaderDb bhdb newHeader return r createBlock - :: Bool + :: Validate -> ParentHeader -> Nonce -> PactQueue @@ -216,7 +248,7 @@ createBlock validate parent nonce pact = do creationTime parent - when validate $ do + when (validate == DoValidate) $ do mv' <- validateBlock bh (payloadWithOutputsToPayloadData payload) pact void $ assertNotLeft =<< takeMVar mv' @@ -250,9 +282,10 @@ withResources :: () => RocksDb -> Word64 -> LogLevel + -> Compact -> RunPactService -> C.Benchmark -withResources rdb trunkLength logLevel f = C.envWithCleanup create destroy unwrap +withResources rdb trunkLength logLevel compact f = C.envWithCleanup create destroy unwrap where unwrap ~(NoopNFData (Resources {..})) = @@ -270,6 +303,13 @@ withResources rdb trunkLength logLevel f = C.envWithCleanup create destroy unwra startPact testVer logger blockHeaderDb payloadDb mp sqlEnv mainTrunkBlocks <- playLine payloadDb blockHeaderDb trunkLength genesisBlock (snd pactService) nonceCounter + when (compact == DoCompact) $ do + C.withDefaultLogger System.Logger.Types.Error $ \lgr -> do + let flags = [C.NoGrandHash] + let db = _sConn sqlEnv + let bh = BlockHeight trunkLength + void $ C.compact (C.Target bh) lgr db flags + return $ NoopNFData $ Resources {..} destroy (NoopNFData (Resources {..})) = do diff --git a/cabal.project b/cabal.project index 39554ed52e..b358111d00 100644 --- a/cabal.project +++ b/cabal.project @@ -125,6 +125,12 @@ source-repository-package tag: 174af3523616c8fe01449da5ccbb9f16df097ac3 --sha256: sha256-kVFIy+Aj3TNJpsM1Cs/5uGmzeWwHKYWjjCQ+L1/XOj8= +source-repository-package + type: git + location: https://github.com/chessai/patience + tag: 2f67d546ea6608fc6ebe5f2f6976503cbf340442 + --sha256: 0x137akvbh4kr3qagksw74xdj2xz5vjnx1fbr41bb54a0lkcb8mm + -- -------------------------------------------------------------------------- -- -- Relaxed Bounds diff --git a/chainweb.cabal b/chainweb.cabal index 72cd8399ec..fbbbc916ac 100644 --- a/chainweb.cabal +++ b/chainweb.cabal @@ -274,6 +274,8 @@ library -- pact , Chainweb.Pact.Backend.ChainwebPactDb , Chainweb.Pact.Backend.DbCache + , Chainweb.Pact.Backend.Compaction + , Chainweb.Pact.Backend.PactState , Chainweb.Pact.Backend.RelationalCheckpointer , Chainweb.Pact.Backend.SQLite.DirectV2 , Chainweb.Pact.Backend.SQLite.V2 @@ -403,6 +405,7 @@ library , tls-session-manager >= 0.0 , token-bucket >= 0.1 , transformers >= 0.5 + , unliftio ^>= 0.2 , unordered-containers >= 0.2.16 , uuid >= 1.3.15 , wai >= 3.2.2.1 @@ -506,7 +509,7 @@ test-suite chainweb-tests build-depends: -- internal - chainweb + , chainweb -- external , Decimal >= 0.4.2 @@ -540,6 +543,7 @@ test-suite chainweb-tests , merkle-log >=0.2 , mtl >= 2.3 , network >= 3.1.2 + , patience >= 0.3 , http-client-tls >=0.3 , pact , pact-json >= 0.1 diff --git a/src/Chainweb/Chainweb.hs b/src/Chainweb/Chainweb.hs index 6033adddef..c8e018507e 100644 --- a/src/Chainweb/Chainweb.hs +++ b/src/Chainweb/Chainweb.hs @@ -328,9 +328,9 @@ validatingMempoolConfig cid v gl gp mv = Mempool.InMemConfig f (This _) = Left (T2 (Mempool.TransactionHash "") (Mempool.InsertErrorOther "preInsertBatch: align mismatch 1")) -data StartedChainweb logger - = forall cas. (CanReadablePayloadCas cas, Logger logger) => StartedChainweb !(Chainweb logger cas) - | Replayed !Cut !Cut +data StartedChainweb logger where + StartedChainweb :: (CanReadablePayloadCas cas, Logger logger) => !(Chainweb logger cas) -> StartedChainweb logger + Replayed :: !Cut -> !Cut -> StartedChainweb logger data ChainwebStatus = ProcessStarted @@ -427,6 +427,7 @@ withChainwebInternal conf logger peer serviceSock rocksDb pactDbDir backupDir re , _pactBlockGasLimit = maybe id min maxGasLimit (_configBlockGasLimit conf) , _pactLogGas = _configLogGas conf , _pactModuleCacheLimit = _configModuleCacheLimit conf + , _pactFullHistoryRequired = _configRosetta conf -- this could be OR'd with other things that require full history } pruningLogger :: T.Text -> logger diff --git a/src/Chainweb/Chainweb/Configuration.hs b/src/Chainweb/Chainweb/Configuration.hs index a0d1fbc0e0..ecfaaceb58 100644 --- a/src/Chainweb/Chainweb/Configuration.hs +++ b/src/Chainweb/Chainweb/Configuration.hs @@ -448,7 +448,7 @@ defaultChainwebConfiguration v = ChainwebConfiguration , _configP2p = defaultP2pConfiguration , _configThrottling = defaultThrottlingConfig , _configMempoolP2p = defaultEnableConfig defaultMempoolP2pConfig - , _configBlockGasLimit = 150000 + , _configBlockGasLimit = 150_000 , _configLogGas = False , _configMinGasPrice = 1e-8 , _configPactQueueSize = 2000 diff --git a/src/Chainweb/Pact/Backend/Compaction.hs b/src/Chainweb/Pact/Backend/Compaction.hs new file mode 100644 index 0000000000..9df0dbf12a --- /dev/null +++ b/src/Chainweb/Pact/Backend/Compaction.hs @@ -0,0 +1,713 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE MultiWayIf #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} + +-- | +-- Module: Chainweb.Pact.Backend.Compaction +-- Copyright: Copyright © 2023 Kadena LLC. +-- License: see LICENSE.md +-- +-- Compact Checkpointer PactDbs by culling old journal rows. +-- + +module Chainweb.Pact.Backend.Compaction + ( CompactFlag(..) + , TargetBlockHeight(..) + , CompactM + , compact + , compactAll + , main + , withDefaultLogger + , withPerChainFileLogger + ) where + +import UnliftIO.Async (pooledMapConcurrentlyN_) +import Control.Concurrent (forkIO, threadDelay) +import Control.Concurrent.MVar (swapMVar, readMVar, newMVar) +import Control.Exception (Exception, SomeException(..)) +import Control.Lens (makeLenses, set, over, view, (^.)) +import Control.Monad (forM_, unless, void, when) +import Control.Monad.Catch (MonadCatch(catch), MonadThrow(throwM)) +import Control.Monad.IO.Class (MonadIO(liftIO)) +import Control.Monad.Reader (MonadReader, ReaderT, runReaderT, local) +import Control.Monad.Trans.Control (MonadBaseControl, liftBaseOp) +import Data.ByteString (ByteString) +import Data.Foldable qualified as F +import Data.Function (fix) +import Data.List qualified as List +import Data.Map.Strict qualified as M +import Data.Set (Set) +import Data.Set qualified as Set +import Data.String (IsString) +import Data.Text (Text) +import Data.Text qualified as Text +import Data.Text.Encoding qualified as Text +import Data.Vector qualified as V +import Data.Vector (Vector) +import Database.SQLite3.Direct (Utf8(..), Database) +import GHC.Stack (HasCallStack) +import Options.Applicative +import System.Directory (createDirectoryIfMissing) +import System.FilePath (()) +import System.IO qualified as IO +import System.IO (Handle) + +import Chainweb.BlockHeight (BlockHeight) +import Chainweb.Logger (setComponent) +import Chainweb.Utils (sshow, HasTextRepresentation, fromText, toText, int) +import Chainweb.Version (ChainId, ChainwebVersion(..), ChainwebVersionName, unsafeChainId, chainIdToText) +import Chainweb.Version.Mainnet (mainnet) +import Chainweb.Version.Registry (lookupVersionByName) +import Chainweb.Version.Utils (chainIdsAt) +import Chainweb.Pact.Backend.Types (SQLiteEnv(..)) +import Chainweb.Pact.Backend.Utils (fromUtf8, withSqliteDb) +import Chainweb.Utils (encodeB64Text) + +import System.Logger +import System.Logger.Backend.ColorOption (useColor) +import Data.LogMessage + +import Pact.Types.Persistence (TxId(..)) +import Pact.Types.SQLite (SType(..), RType(..)) +import Pact.Types.SQLite qualified as Pact + +newtype TableName = TableName { getTableName :: Utf8 } + deriving stock (Show) + deriving newtype (Eq, IsString) + +data CompactException + = CompactExceptionInternal !Text + | CompactExceptionDb !SomeException + | CompactExceptionInvalidBlockHeight !BlockHeight + | CompactExceptionTableVerificationFailure !TableName + | CompactExceptionNoLatestBlockHeight + deriving stock (Show) + deriving anyclass (Exception) + +data CompactFlag + = KeepCompactTables + -- ^ Keep compaction tables post-compaction for inspection. + | NoVacuum + -- ^ Don't VACUUM database + | NoDropNewTables + -- ^ Don't drop new tables created after the compaction height. + | NoGrandHash + -- ^ Don't compute the grand hash. + deriving stock (Eq,Show,Read,Enum,Bounded) + +internalError :: MonadThrow m => Text -> m a +internalError = throwM . CompactExceptionInternal + +data CompactEnv = CompactEnv + { _ceLogger :: !(Logger SomeLogMessage) + , _ceDb :: !Database + , _ceFlags :: ![CompactFlag] + } +makeLenses ''CompactEnv + +withDefaultLogger :: LogLevel -> (Logger SomeLogMessage -> IO a) -> IO a +withDefaultLogger ll f = withHandleBackend_ logText defaultHandleBackendConfig $ \b -> + withLogger defaultLoggerConfig b $ \l -> f (set setLoggerLevel ll l) + +withPerChainFileLogger :: FilePath -> ChainId -> LogLevel -> (Logger SomeLogMessage -> IO a) -> IO a +withPerChainFileLogger logDir chainId ll f = do + createDirectoryIfMissing False {- don't create parents -} logDir + let logFile = logDir ("chain-" <> cid <> ".log") + !_ <- writeFile logFile "" + let handleConfig = defaultHandleBackendConfig + { _handleBackendConfigHandle = FileHandle logFile + } + withHandleBackend_' logText handleConfig $ \h b -> do + + done <- newMVar False + void $ forkIO $ fix $ \go -> do + doneYet <- readMVar done + when (not doneYet) $ do + IO.hFlush h + threadDelay 5_000_000 + go + IO.hFlush h + + withLogger defaultLoggerConfig b $ \l -> do + let logger = setComponent "compaction" + $ over setLoggerScope (("chain", chainIdToText chainId) :) + $ set setLoggerLevel ll l + a <- f logger + void $ swapMVar done True + pure a + where + cid = Text.unpack (chainIdToText chainId) + +withHandleBackend_' :: (MonadIO m, MonadBaseControl IO m) + => (msg -> Text) + -> HandleBackendConfig + -> (Handle -> LoggerBackend msg -> m a) + -> m a +withHandleBackend_' format conf inner = + case conf ^. handleBackendConfigHandle of + StdErr -> run IO.stderr + StdOut -> run IO.stdout + FileHandle file -> liftBaseOp (IO.withFile file IO.AppendMode) run + where + run h = do + colored <- liftIO $ useColor (conf ^. handleBackendConfigColor) h + inner h (handleBackend_ format h colored) + +newtype CompactM a = CompactM { unCompactM :: ReaderT CompactEnv IO a } + deriving newtype (Functor,Applicative,Monad,MonadReader CompactEnv,MonadIO,MonadThrow,MonadCatch) + +instance MonadLog Text CompactM where + localScope :: (LogScope -> LogScope) -> CompactM x -> CompactM x + localScope f = local (over (ceLogger . setLoggerScope) f) + + logg :: LogLevel -> Text -> CompactM () + logg ll m = do + l <- view ceLogger + liftIO $ loggerFunIO l ll $ toLogMessage $ TextLog m + + withLevel :: LogLevel -> CompactM x -> CompactM x + withLevel l = local (set (ceLogger.setLoggerLevel) l) + + withPolicy :: LogPolicy -> CompactM x -> CompactM x + withPolicy p = local (set (ceLogger.setLoggerPolicy) p) + +-- | Run compaction monad +runCompactM :: CompactEnv -> CompactM a -> IO a +runCompactM e a = runReaderT (unCompactM a) e + +-- | Prepare/Execute a "$VTABLE$"-templated query. +execM_ :: () + => Text -- ^ query name (for logging purposes) + -> TableName -- ^ table name + -> Text -- ^ "$VTABLE$"-templated query + -> CompactM () +execM_ msg tbl q = do + db <- view ceDb + logQueryDebug msg + q' <- templateStmt tbl q + liftIO $ Pact.exec_ db q' + +execNoTemplateM_ :: () + => Text -- ^ query name (for logging purposes) + -> Utf8 -- ^ query + -> CompactM () +execNoTemplateM_ msg qry = do + db <- view ceDb + logQueryDebug msg + liftIO $ Pact.exec_ db qry + +-- | Prepare/Execute a "$VTABLE$"-templated, parameterised query. +-- The parameters are the results of the 'CompactM' 'SType' computations. +execM' :: () + => Text -- ^ query name (for logging purposes) + -> TableName -- ^ table name + -> Text -- ^ "$VTABLE$"-templated query + -> [SType] -- ^ parameters + -> CompactM () +execM' msg tbl stmt ps = do + db <- view ceDb + logQueryDebug msg + stmt' <- templateStmt tbl stmt + liftIO $ Pact.exec' db stmt' ps + +exec_ :: () + => Text + -> Utf8 + -> CompactM () +exec_ msg qry = do + db <- view ceDb + logQueryDebug msg + liftIO $ Pact.exec_ db qry + +qry_ :: () + => Text + -> Utf8 + -> [RType] + -> CompactM [[SType]] +qry_ msg qry rs = do + db <- view ceDb + logQueryDebug msg + liftIO $ Pact.qry_ db qry rs + +-- | Prepare/Execute a "$VTABLE$"-templated, parameterised query. +-- 'RType's are the expected results. +qryM :: () + => Text -- ^ query name (for logging purposes) + -> TableName -- ^ table name + -> Text -- ^ "$VTABLE$"-templated query + -> [SType] -- ^ parameters + -> [RType] -- ^ result types + -> CompactM [[SType]] +qryM msg tbl q ins outs = do + db <- view ceDb + logQueryDebug msg + q' <- templateStmt tbl q + liftIO $ Pact.qry db q' ins outs + +qryNoTemplateM :: () + => Text -- ^ query name (for logging purposes) + -> Utf8 -- ^ query + -> [SType] -- ^ parameters + -> [RType] -- ^ results + -> CompactM [[SType]] +qryNoTemplateM msg q ins outs = do + db <- view ceDb + logQueryDebug msg + liftIO $ Pact.qry db q ins outs + +logQueryDebug :: Text -> CompactM () +logQueryDebug msg = do + logg Info ("Query: " <> msg) + +-- | Statements are templated with "$VTABLE$" substituted +-- with the currently-focused versioned table. +templateStmt :: TableName -> Text -> CompactM Utf8 +templateStmt (TableName (Utf8 tblName)) s = + pure $ Utf8 $ Text.encodeUtf8 $ + Text.replace "$VTABLE$" ("[" <> Text.decodeUtf8 tblName <> "]") s + +-- | Execute a SQLite transaction, rolling back on failure. +-- Throws a 'CompactExceptionDb' on failure. +withTx :: HasCallStack => CompactM a -> CompactM a +withTx a = do + exec_ "withTx.0" "SAVEPOINT compact_tx" + catch (a >>= \r -> exec_ "withTx.1" "RELEASE SAVEPOINT compact_tx" >> pure r) $ + \e@SomeException {} -> do + exec_ "withTx.2" "ROLLBACK TRANSACTION TO SAVEPOINT compact_tx" + throwM $ CompactExceptionDb e + +unlessFlagSet :: CompactFlag -> CompactM () -> CompactM () +unlessFlagSet f x = do + yeahItIs <- isFlagSet f + unless yeahItIs x + +isFlagSet :: CompactFlag -> CompactM Bool +isFlagSet f = view ceFlags >>= \fs -> pure (f `elem` fs) + +isFlagNotSet :: CompactFlag -> CompactM Bool +isFlagNotSet f = not <$> isFlagSet f + +withTables :: Vector TableName -> (TableName -> CompactM a) -> CompactM () +withTables ts a = do + V.iforM_ ts $ \i u@(TableName (Utf8 t')) -> do + let lbl = Text.decodeUtf8 t' <> " (" <> sshow (i + 1) <> " of " <> sshow (V.length ts) <> ")" + localScope (("table",lbl):) $ a u + +-- | Takes a bunch of singleton tablename rows, sorts them, returns them as +-- @TableName@ +sortedTableNames :: [[SType]] -> [TableName] +sortedTableNames rows = M.elems $ M.fromListWith const $ flip List.map rows $ \case + [SText n@(Utf8 s)] -> (Text.toLower (Text.decodeUtf8 s), TableName n) + _ -> error "sortedTableNames: expected text" + +-- | CompactGrandHash associates table name with grand hash of its versioned rows, +-- and NULL with grand hash of all table hashes. +createCompactGrandHash :: CompactM () +createCompactGrandHash = do + logg Info "createTables" + execNoTemplateM_ "createTable: CompactGrandHash" + " CREATE TABLE IF NOT EXISTS CompactGrandHash \ + \ ( tablename TEXT \ + \ , hash BLOB \ + \ , UNIQUE (tablename) ); " + + execNoTemplateM_ "deleteFrom: CompactGrandHash" + "DELETE FROM CompactGrandHash" + +-- | CompactActiveRow collects all active rows from all tables. +createCompactActiveRow :: CompactM () +createCompactActiveRow = do + execNoTemplateM_ "createTable: CompactActiveRow" + " CREATE TABLE IF NOT EXISTS CompactActiveRow \ + \ ( tablename TEXT NOT NULL \ + \ , rowkey TEXT NOT NULL \ + \ , vrowid INTEGER NOT NULL \ + \ , hash BLOB \ + \ , UNIQUE (tablename,rowkey) ); " + + execNoTemplateM_ "deleteFrom: CompactActiveRow" + "DELETE FROM CompactActiveRow" + +locateTarget :: TargetBlockHeight -> CompactM BlockHeight +locateTarget = \case + Target bh -> do + ensureBlockHeightExists bh + pure bh + Latest -> do + getLatestBlockHeight + +ensureBlockHeightExists :: BlockHeight -> CompactM () +ensureBlockHeightExists bh = do + r <- qryNoTemplateM + "ensureBlockHeightExists.0" + "SELECT blockheight FROM BlockHistory WHERE blockheight = ?1" + [bhToSType bh] + [RInt] + case r of + [[SInt rBH]] -> do + when (fromIntegral bh /= rBH) $ do + throwM $ CompactExceptionInvalidBlockHeight bh + _ -> do + error "ensureBlockHeightExists.0: impossible" + +getLatestBlockHeight :: CompactM BlockHeight +getLatestBlockHeight = do + r <- qryNoTemplateM + "getLatestBlockHeight.0" + "SELECT blockheight FROM BlockHistory ORDER BY blockheight DESC LIMIT 1" + [] + [RInt] + case r of + [[SInt bh]] -> do + pure (fromIntegral bh) + _ -> do + throwM CompactExceptionNoLatestBlockHeight + +getEndingTxId :: BlockHeight -> CompactM TxId +getEndingTxId bh = do + r <- qryNoTemplateM + "getTxId.0" + "SELECT endingtxid FROM BlockHistory WHERE blockheight=?" + [bhToSType bh] + [RInt] + case r of + [] -> do + throwM (CompactExceptionInvalidBlockHeight bh) + [[SInt t]] -> do + pure (TxId (fromIntegral t)) + _ -> do + internalError "initialize: expected single-row int" + +getVersionedTables :: BlockHeight -> CompactM (Vector TableName) +getVersionedTables bh = do + logg Info "getVersionedTables" + rs <- qryNoTemplateM + "getVersionedTables.0" + " SELECT DISTINCT tablename FROM VersionedTableMutation \ + \ WHERE blockheight <= ? ORDER BY blockheight; " + [bhToSType bh] + [RText] + pure (V.fromList (sortedTableNames rs)) + +tableRowCount :: TableName -> Text -> CompactM () +tableRowCount tbl label = + qryM "tableRowCount.0" tbl "SELECT COUNT(*) FROM $VTABLE$" [] [RInt] >>= \case + [[SInt r]] -> logg Info $ label <> ":rowcount=" <> sshow r + _ -> internalError "count(*) failure" + +-- | For a given table, collect all active rows into CompactActiveRow, +-- and compute+store table grand hash in CompactGrandHash. +collectTableRows :: TxId -> TableName -> CompactM () +collectTableRows txId tbl = do + tableRowCount tbl "collectTableRows" + let vt = tableNameToSType tbl + let txid = txIdToSType txId + + doGrandHash <- isFlagNotSet NoGrandHash + + let collectInsert = Text.concat + [ "INSERT INTO CompactActiveRow " + , "SELECT ?1,rowkey,rowid," <> if doGrandHash + then "sha3_256('T',?1,'K',rowkey,'I',txid,'D',rowdata) " + else "NULL " + , "FROM $VTABLE$ t1 " + , "WHERE txid=(SELECT MAX(txid) FROM $VTABLE$ t2 " + , "WHERE t2.rowkey=t1.rowkey AND t2.txid>= \case + [[SBlob h]] -> pure h + _ -> throwM $ CompactExceptionInternal "computeGlobalHash: bad result" + +-- | Delete non-active rows from given table. +compactTable :: TableName -> CompactM () +compactTable tbl = do + logg Info $ "compactTable: " <> fromUtf8 (getTableName tbl) + + execM' + "compactTable.0" + tbl + " DELETE FROM $VTABLE$ WHERE rowid NOT IN \ + \ (SELECT t.rowid FROM $VTABLE$ t \ + \ LEFT JOIN CompactActiveRow v \ + \ WHERE t.rowid = v.vrowid AND v.tablename=?1); " + [tableNameToSType tbl] + +-- | For given table, re-compute table grand hash and compare +-- with stored grand hash in CompactGrandHash. +verifyTable :: TableName -> CompactM ByteString +verifyTable tbl = do + logg Info "verifyTable" + curr <- computeTableHash tbl + rs <- qryNoTemplateM "verifyTable.0" + "SELECT hash FROM CompactGrandHash WHERE tablename=?1" + [tableNameToSType tbl] + [RBlob] + case rs of + [[SBlob prev]] + | prev == curr -> do + tableRowCount tbl "verifyTable" + pure curr + | otherwise -> + throwM (CompactExceptionTableVerificationFailure tbl) + _ -> throwM $ CompactExceptionInternal "verifyTable: bad result" + +-- | For given table, compute table grand hash for max txid. +computeTableHash :: TableName -> CompactM ByteString +computeTableHash tbl = do + rs <- qryM "computeTableHash.0" tbl + " SELECT sha3a_256(hash) FROM \ + \ (SELECT sha3_256('T',?1,'K',rowkey,'I',txid,'D',rowdata) as hash \ + \ FROM $VTABLE$ t1 \ + \ WHERE txid=(select max(txid) FROM $VTABLE$ t2 \ + \ WHERE t2.rowkey=t1.rowkey) GROUP BY rowkey); " + [tableNameToSType tbl] + [RBlob] + case rs of + [[SBlob curr]] -> pure curr + _ -> throwM $ CompactExceptionInternal "checksumTable: bad result" + +-- | Drop any versioned tables created after target blockheight. +dropNewTables :: BlockHeight -> CompactM () +dropNewTables bh = do + logg Info "dropNewTables" + nts <- fmap (V.fromList . sortedTableNames) $ qryNoTemplateM "dropNewTables.0" + " SELECT tablename FROM VersionedTableCreation \ + \ WHERE createBlockheight > ?1 ORDER BY createBlockheight; " + [bhToSType bh] + [RText] + + withTables nts $ \tbl -> do + execM_ "dropNewTables.1" tbl "DROP TABLE IF EXISTS $VTABLE$" + +-- | Delete all rows from Checkpointer system tables that are not for the target blockheight. +compactSystemTables :: BlockHeight -> CompactM () +compactSystemTables bh = do + let systemTables = ["BlockHistory", "VersionedTableMutation", "TransactionIndex", "VersionedTableCreation"] + forM_ systemTables $ \tbl -> do + let tblText = fromUtf8 (getTableName tbl) + logg Info $ "Compacting system table " <> tblText + let column = + if tbl == "VersionedTableCreation" + then "createBlockheight" + else "blockheight" + execM' + ("compactSystemTables: " <> tblText) + tbl + ("DELETE FROM $VTABLE$ WHERE " <> column <> " != ?1;") + [bhToSType bh] + +dropCompactTables :: CompactM () +dropCompactTables = do + execNoTemplateM_ "dropCompactTables.0" + " DROP TABLE CompactGrandHash; \ + \ DROP TABLE CompactActiveRow; " + +compact :: () + => TargetBlockHeight + -> Logger SomeLogMessage + -> Database + -> [CompactFlag] + -> IO (Maybe ByteString) +compact tbh logger db flags = runCompactM (CompactEnv logger db flags) $ do + logg Info "Beginning compaction" + + doGrandHash <- isFlagNotSet NoGrandHash + + withTx $ do + createCompactGrandHash + createCompactActiveRow + + blockHeight <- locateTarget tbh + txId <- getEndingTxId blockHeight + + logg Info $ "Target blockheight: " <> sshow blockHeight + logg Info $ "Ending TxId: " <> sshow txId + + versionedTables <- getVersionedTables blockHeight + + gh <- withTx $ do + withTables versionedTables $ \tbl -> collectTableRows txId tbl + if doGrandHash + then Just <$> computeGlobalHash + else pure Nothing + + withTx $ do + withTables versionedTables $ \tbl -> do + compactTable tbl + unlessFlagSet NoGrandHash $ void $ verifyTable tbl + unlessFlagSet NoDropNewTables $ do + logg Info "Dropping new tables" + dropNewTables blockHeight + compactSystemTables blockHeight + + unlessFlagSet KeepCompactTables $ do + logg Info "Dropping compact-specific tables" + withTx dropCompactTables + + unlessFlagSet NoVacuum $ do + logg Info "Vacuum" + execNoTemplateM_ "VACUUM" "VACUUM;" + + case gh of + Just h -> do + logg Info $ "Compaction complete, hash=" <> encodeB64Text h + Nothing -> do + logg Info "Compaction complete" + + pure gh + +data TargetBlockHeight + = Target !BlockHeight + -- ^ compact to this blockheight across all chains + | Latest + -- ^ for each chain, compact to its latest blockheight + deriving stock (Eq, Show) + +data CompactConfig = CompactConfig + { ccBlockHeight :: TargetBlockHeight + , ccDbDir :: FilePath + , ccVersion :: ChainwebVersion + , ccFlags :: [CompactFlag] + , ccChains :: Maybe (Set ChainId) + , logDir :: FilePath + , ccThreads :: Int + } + deriving stock (Eq, Show) + +compactAll :: CompactConfig -> IO () +compactAll CompactConfig{..} = do + latestBlockHeightChain0 <- do + let cid = unsafeChainId 0 + withDefaultLogger Debug $ \logger -> do + let resetDb = False + withSqliteDb cid logger ccDbDir resetDb $ \(SQLiteEnv db _) -> do + runCompactM (CompactEnv logger db []) getLatestBlockHeight + + let allCids = Set.fromList $ F.toList $ chainIdsAt ccVersion latestBlockHeightChain0 + let targetCids = Set.toList $ maybe allCids (Set.intersection allCids) ccChains + + flip (pooledMapConcurrentlyN_ ccThreads) targetCids $ \cid -> do + withPerChainFileLogger logDir cid Debug $ \logger -> do + let resetDb = False + withSqliteDb cid logger ccDbDir resetDb $ \(SQLiteEnv db _) -> do + void $ compact ccBlockHeight logger db ccFlags + +main :: IO () +main = do + config <- execParser opts + compactAll config + where + opts :: ParserInfo CompactConfig + opts = info (parser <**> helper) + (fullDesc <> progDesc "Pact DB Compaction tool") + + collapseSum :: [Parser [a]] -> Parser [a] + collapseSum = foldr (\x y -> (++) <$> x <*> y) (pure []) + + maybeList :: [a] -> Maybe [a] + maybeList = \case + [] -> Nothing + xs -> Just xs + + parser :: Parser CompactConfig + parser = CompactConfig + <$> (fmap Target (fromIntegral @Int <$> option auto + (short 'b' + <> long "target-blockheight" + <> metavar "BLOCKHEIGHT" + <> help "Target blockheight")) <|> pure Latest) + <*> strOption + (short 'd' + <> long "pact-database-dir" + <> metavar "DBDIR" + <> help "Pact database directory") + <*> ((lookupVersionByName . fromTextSilly @ChainwebVersionName) <$> strOption + (short 'v' + <> long "graph-version" + <> metavar "VERSION" + <> help "Chainweb version for graph. Only needed for non-standard graphs." + <> value (toText (_versionName mainnet)) + <> showDefault)) + <*> collapseSum + [ flag [] [KeepCompactTables] + (long "keep-compact-tables" + <> help "Keep compaction tables post-compaction, for inspection.") + , flag [] [NoVacuum] + (long "no-vacuum" + <> help "Don't VACUUM database.") + , flag [] [NoDropNewTables] + (long "no-drop-new-tables" + <> help "Don't drop new tables.") + , flag [] [NoGrandHash] + (long "no-grand-hash" + <> help "Don't compute the compact grand hash.") + ] + <*> fmap (fmap Set.fromList . maybeList) (many (unsafeChainId <$> option auto + (short 'c' + <> long "chain" + <> metavar "CHAINID" + <> help "Add this chain to the target set of ones to compact."))) + <*> strOption + (long "log-dir" + <> metavar "DIRECTORY" + <> help "Directory where logs will be placed" + <> value ".") + <*> option auto + (short 't' + <> long "threads" + <> metavar "THREADS" + <> value 4 + <> help "Number of threads for compaction processing") + +fromTextSilly :: HasTextRepresentation a => Text -> a +fromTextSilly t = case fromText t of + Just a -> a + Nothing -> error "fromText failed" + +bhToSType :: BlockHeight -> SType +bhToSType bh = SInt (int bh) + +txIdToSType :: TxId -> SType +txIdToSType (TxId txid) = SInt (fromIntegral txid) + +tableNameToSType :: TableName -> SType +tableNameToSType (TableName tbl) = SText tbl diff --git a/src/Chainweb/Pact/Backend/PactState.hs b/src/Chainweb/Pact/Backend/PactState.hs new file mode 100644 index 0000000000..2f2ad4ba81 --- /dev/null +++ b/src/Chainweb/Pact/Backend/PactState.hs @@ -0,0 +1,428 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE DuplicateRecordFields #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE MultiWayIf #-} +{-# LANGUAGE OverloadedRecordDot #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StrictData #-} +{-# LANGUAGE TypeApplications #-} + +-- | +-- Module: Chainweb.Pact.Backend.PactState +-- Copyright: Copyright © 2023 Kadena LLC. +-- License: see LICENSE.md +-- +-- Diff Pact state between two databases. +-- +-- There are other utilities provided by this module whose purpose is either +-- to get the pact state. +-- +-- The code in this module operates primarily on 'Stream's, because the amount +-- of user data can grow quite large. by comparing one table at a time, we can +-- keep maximum memory utilisation in check. +-- + +module Chainweb.Pact.Backend.PactState + ( getPactTableNames + , getPactTables + , getLatestPactState + , getLatestBlockHeight + + , PactRow(..) + , Table(..) + , TableDiffable(..) + + , pactDiffMain + ) + where + +import Data.IORef (newIORef, readIORef, atomicModifyIORef') +import Control.Exception (bracket) +import Control.Monad (forM, forM_, when, void) +import Control.Monad.IO.Class (MonadIO(liftIO)) +import Control.Monad.Trans.Class (lift) +import Control.Monad.Except (ExceptT(..), runExceptT, throwError) +import Data.Aeson (ToJSON(..), (.=)) +import Data.Aeson qualified as Aeson +import Data.Vector (Vector) +import Data.Vector qualified as Vector +import Data.ByteString (ByteString) +import Data.Foldable qualified as F +import Data.Int (Int64) +import Data.List qualified as List +import Data.Map (Map) +import Data.Map.Merge.Strict qualified as Merge +import Data.Map.Strict qualified as M +import Data.Text (Text) +import Data.Text qualified as Text +import Data.Text.IO qualified as Text +import Data.Text.Encoding qualified as Text +import Database.SQLite3.Direct (Utf8(..), Database) +import Database.SQLite3.Direct qualified as SQL +import Options.Applicative + +import Chainweb.BlockHeight (BlockHeight(..)) +import Chainweb.Logger (logFunctionText, logFunctionJson) +import Chainweb.Utils (HasTextRepresentation, fromText, toText, int) +import Chainweb.Version (ChainwebVersion(..), ChainwebVersionName, ChainId, chainIdToText) +import Chainweb.Version.Mainnet (mainnet) +import Chainweb.Version.Registry (lookupVersionByName) +import Chainweb.Version.Utils (chainIdsAt) +import Chainweb.Pact.Backend.Types (SQLiteEnv(..)) +import Chainweb.Pact.Backend.Utils (fromUtf8, withSqliteDb) +import Chainweb.Pact.Backend.Compaction qualified as C + +import System.Directory (doesFileExist) +import System.FilePath (()) +import System.Exit (exitFailure) +import System.Logger (LogLevel(..)) +import System.LogLevel qualified as LL + +import Pact.Types.SQLite (SType(..), RType(..)) +import Pact.Types.SQLite qualified as Pact +import Streaming.Prelude (Stream, Of) +import Streaming.Prelude qualified as S + +excludedTables :: [Utf8] +excludedTables = checkpointerTables ++ compactionTables + where + checkpointerTables = ["BlockHistory", "VersionedTableCreation", "VersionedTableMutation", "TransactionIndex"] + compactionTables = ["CompactGrandHash", "CompactActiveRow"] + +getLatestBlockHeight :: Database -> IO BlockHeight +getLatestBlockHeight db = do + let qryText = "SELECT MAX(blockheight) FROM BlockHistory" + Pact.qry db qryText [] [RInt] >>= \case + [[SInt bh]] -> pure (BlockHeight (int bh)) + _ -> error "getLatestBlockHeight: expected int" + +getPactTableNames :: Database -> IO (Vector Utf8) +getPactTableNames db = do + let sortedTableNames :: [[SType]] -> [Utf8] + sortedTableNames rows = M.elems $ M.fromListWith const $ flip List.map rows $ \case + [SText u] -> (Text.toLower (fromUtf8 u), u) + _ -> error "getPactTableNames.sortedTableNames: expected text" + + tables <- fmap sortedTableNames $ do + let qryText = + "SELECT name FROM sqlite_schema \ + \WHERE \ + \ type = 'table' \ + \AND \ + \ name NOT LIKE 'sqlite_%'" + Pact.qry db qryText [] [RText] + + pure (Vector.fromList tables) + +-- | Get all of the rows for each table. The tables will be sorted +-- lexicographically by name. +getPactTables :: Database -> Stream (Of Table) IO () +getPactTables db = do + let fmtTable x = "\"" <> x <> "\"" + + tables <- liftIO $ getPactTableNames db + + forM_ tables $ \tbl -> do + if tbl `notElem` excludedTables + then do + let qryText = "SELECT rowkey, rowdata, txid FROM " + <> fmtTable tbl + userRows <- liftIO $ Pact.qry db qryText [] [RText, RBlob, RInt] + shapedRows <- forM userRows $ \case + [SText (Utf8 rowKey), SBlob rowData, SInt txId] -> do + pure $ PactRow {..} + _ -> error "getPactTableNames: unexpected shape of user table row" + S.yield $ Table (fromUtf8 tbl) shapedRows + else do + pure () + +stepStatement :: SQL.Statement -> [RType] -> Stream (Of [SType]) IO (Either SQL.Error ()) +stepStatement stmt rts = runExceptT $ do + -- todo: rename from acc + let acc :: SQL.StepResult -> ExceptT SQL.Error (Stream (Of [SType]) IO) () + acc = \case + SQL.Done -> do + pure () + SQL.Row -> do + as <- forM (List.zip [0..] rts) $ \(colIx, expectedColType) -> do + liftIO $ case expectedColType of + RInt -> SInt <$> SQL.columnInt64 stmt colIx + RDouble -> SDouble <$> SQL.columnDouble stmt colIx + RText -> SText <$> SQL.columnText stmt colIx + RBlob -> SBlob <$> SQL.columnBlob stmt colIx + lift $ S.yield as + liftIO (SQL.step stmt) >>= \case + Left err -> do + throwError err + Right sr -> do + acc sr + + -- maybe use stepNoCB + ExceptT (liftIO (SQL.step stmt)) >>= acc + +-- | Prepare/execute query with params +qry :: () + => Database + -> Utf8 + -> [SType] + -> [RType] + -> (Stream (Of [SType]) IO (Either SQL.Error ()) -> IO x) + -> IO x +qry db qryText args returnTypes k = do + bracket (Pact.prepStmt db qryText) SQL.finalize $ \stmt -> do + Pact.bindParams stmt args + k (stepStatement stmt returnTypes) + +getLatestPactState :: Database -> Stream (Of TableDiffable) IO () +getLatestPactState db = do + let fmtTable x = "\"" <> x <> "\"" + + tables <- liftIO $ getPactTableNames db + + forM_ tables $ \tbl -> do + when (tbl `notElem` excludedTables) $ do + let qryText = "SELECT rowkey, rowdata, txid FROM " + <> fmtTable tbl + latestState <- fmap (M.map (\prc -> prc.rowData)) $ liftIO $ qry db qryText [] [RText, RBlob, RInt] $ \rows -> do + let go :: Map ByteString PactRowContents -> [SType] -> Map ByteString PactRowContents + go m = \case + [SText (Utf8 rowKey), SBlob rowData, SInt txId] -> + M.insertWith (\prc1 prc2 -> if prc1.txId > prc2.txId then prc1 else prc2) rowKey (PactRowContents rowData txId) m + _ -> error "getLatestPactState: unexpected shape of user table row" + S.fold_ go M.empty id rows + S.yield (TableDiffable (fromUtf8 tbl) latestState) + +-- This assumes the same tables (essentially zipWith). +-- Note that this assumes we got the state from `getLatestPactState`, +-- because `getPactTableNames` sorts the table names, and `getLatestPactState` +-- sorts the [PactRow] by rowKey. +-- +-- If we ever step across two tables that do not have the same name, we throw an error. +-- +-- This diminishes the utility of comparing two pact states that are known to be +-- at different heights, but that hurts our ability to perform the diff in +-- constant memory. +-- +-- TODO: maybe inner stream should be a ByteStream +diffLatestPactState :: () + => Stream (Of TableDiffable) IO () + -> Stream (Of TableDiffable) IO () + -> Stream (Of (Text, Stream (Of RowKeyDiffExists) IO ())) IO () +diffLatestPactState = go + where + go :: Stream (Of TableDiffable) IO () -> Stream (Of TableDiffable) IO () -> Stream (Of (Text, Stream (Of RowKeyDiffExists) IO ())) IO () + go s1 s2 = do + e1 <- liftIO $ S.next s1 + e2 <- liftIO $ S.next s2 + + case (e1, e2) of + (Left (), Left ()) -> do + pure () + (Right _, Left ()) -> do + error "left stream longer than right" + (Left (), Right _) -> do + error "right stream longer than left" + (Right (t1, next1), Right (t2, next2)) -> do + when (t1.name /= t2.name) $ do + error "diffLatestPactState: mismatched table names" + S.yield (t1.name, diffTables t1 t2) + go next1 next2 + +-- | We don't include the entire rowdata in the diff, only the rowkey. +-- This is just a space-saving measure. +data RowKeyDiffExists + = Old ByteString + -- ^ The rowkey exists in the same table of the first db, but not the second. + | New ByteString + -- ^ The rowkey exists in the same table of the second db, but not the first. + | Delta ByteString + -- ^ The rowkey exists in the same table of both dbs, but the rowdata + -- differs. + +diffTables :: TableDiffable -> TableDiffable -> Stream (Of RowKeyDiffExists) IO () +diffTables t1 t2 = do + void $ Merge.mergeA + (Merge.traverseMaybeMissing $ \rk _rd -> do + S.yield (Old rk) + pure Nothing + ) + (Merge.traverseMaybeMissing $ \rk _rd -> do + S.yield (New rk) + pure Nothing + ) + (Merge.zipWithMaybeAMatched $ \rk rd1 rd2 -> do + when (rd1 /= rd2) $ do + S.yield (Delta rk) + pure Nothing + ) + t1.rows + t2.rows + +rowKeyDiffExistsToObject :: RowKeyDiffExists -> Aeson.Value +rowKeyDiffExistsToObject = \case + Old rk -> Aeson.object + [ "old" .= Text.decodeUtf8 rk + ] + New rk -> Aeson.object + [ "new" .= Text.decodeUtf8 rk + ] + Delta rk -> Aeson.object + [ "delta" .= Text.decodeUtf8 rk + ] + +-- | A pact table - just its name and its rows. +data Table = Table + { name :: Text + , rows :: [PactRow] + } + deriving stock (Eq, Show) + +-- | A diffable pact table - its name and the _active_ pact state +-- as a Map from RowKey to RowData. +data TableDiffable = TableDiffable + { name :: Text + , rows :: Map ByteString ByteString -- Map RowKey RowData + } + deriving stock (Eq, Ord, Show) + +data PactRow = PactRow + { rowKey :: ByteString + , rowData :: ByteString + , txId :: Int64 + } + deriving stock (Eq, Show) + +instance Ord PactRow where + compare pr1 pr2 = + compare pr1.txId pr2.txId + <> compare pr1.rowKey pr2.rowKey + <> compare pr1.rowData pr2.rowData + +instance ToJSON PactRow where + toJSON pr = Aeson.object + [ "row_key" .= Text.decodeUtf8 pr.rowKey + , "row_data" .= Text.decodeUtf8 pr.rowData + , "tx_id" .= pr.txId + ] + +data PactRowContents = PactRowContents + { rowData :: ByteString + , txId :: Int64 + } + deriving stock (Eq, Show) + +data PactDiffConfig = PactDiffConfig + { firstDbDir :: FilePath + , secondDbDir :: FilePath + , chainwebVersion :: ChainwebVersion + , logDir :: FilePath + } + +data Diffy = Difference | NoDifference + deriving stock (Eq) + +instance Semigroup Diffy where + Difference <> _ = Difference + _ <> Difference = Difference + _ <> _ = NoDifference + +instance Monoid Diffy where + mempty = NoDifference + +pactDiffMain :: IO () +pactDiffMain = do + cfg <- execParser opts + + when (cfg.firstDbDir == cfg.secondDbDir) $ do + Text.putStrLn "Source and target Pact database directories cannot be the same." + exitFailure + + let cids = List.sort $ F.toList $ chainIdsAt cfg.chainwebVersion (BlockHeight maxBound) + + diffyRef <- newIORef @(Map ChainId Diffy) M.empty + + forM_ cids $ \cid -> do + C.withPerChainFileLogger cfg.logDir cid Info $ \logger -> do + let logText = logFunctionText logger + + sqliteFileExists1 <- doesPactDbExist cid cfg.firstDbDir + sqliteFileExists2 <- doesPactDbExist cid cfg.secondDbDir + + if | not sqliteFileExists1 -> do + logText LL.Warn $ "[SQLite for chain in " <> Text.pack cfg.firstDbDir <> " doesn't exist. Skipping]" + | not sqliteFileExists2 -> do + logText LL.Warn $ "[SQLite for chain in " <> Text.pack cfg.secondDbDir <> " doesn't exist. Skipping]" + | otherwise -> do + let resetDb = False + withSqliteDb cid logger cfg.firstDbDir resetDb $ \(SQLiteEnv db1 _) -> do + withSqliteDb cid logger cfg.secondDbDir resetDb $ \(SQLiteEnv db2 _) -> do + logText LL.Info "[Starting diff]" + let diff = diffLatestPactState (getLatestPactState db1) (getLatestPactState db2) + diffy <- S.foldMap_ id $ flip S.mapM diff $ \(tblName, tblDiff) -> do + logText LL.Info $ "[Starting table " <> tblName <> "]" + d <- S.foldMap_ id $ flip S.mapM tblDiff $ \d -> do + logFunctionJson logger LL.Warn $ rowKeyDiffExistsToObject d + pure Difference + logText LL.Info $ "[Finished table " <> tblName <> "]" + pure d + + logText LL.Info $ case diffy of + Difference -> "[Non-empty diff]" + NoDifference -> "[Empty diff]" + logText LL.Info $ "[Finished chain " <> chainIdToText cid <> "]" + + atomicModifyIORef' diffyRef $ \m -> (M.insert cid diffy m, ()) + + diffy <- readIORef diffyRef + case M.foldMapWithKey (\_ d -> d) diffy of + Difference -> do + Text.putStrLn "Diff complete. Differences found." + exitFailure + NoDifference -> do + Text.putStrLn "Diff complete. No differences found." + where + opts :: ParserInfo PactDiffConfig + opts = info (parser <**> helper) + (fullDesc <> progDesc "Compare two Pact databases") + + parser :: Parser PactDiffConfig + parser = PactDiffConfig + <$> strOption + (long "first-database-dir" + <> metavar "PACT_DB_DIRECTORY" + <> help "First Pact database directory") + <*> strOption + (long "second-database-dir" + <> metavar "PACT_DB_DIRECTORY" + <> help "Second Pact database directory") + <*> (fmap (lookupVersionByName . fromTextSilly @ChainwebVersionName) $ strOption + (long "graph-version" + <> metavar "CHAINWEB_VERSION" + <> help "Chainweb version for graph. Only needed for non-standard graphs." + <> value (toText (_versionName mainnet)) + <> showDefault)) + <*> strOption + (long "log-dir" + <> metavar "LOG_DIRECTORY" + <> help "Directory where logs will be placed" + <> value ".") + +fromTextSilly :: HasTextRepresentation a => Text -> a +fromTextSilly t = case fromText t of + Just a -> a + Nothing -> error "fromText failed" + +doesPactDbExist :: ChainId -> FilePath -> IO Bool +doesPactDbExist cid dbDir = do + let chainDbFileName = mconcat + [ "pact-v1-chain-" + , Text.unpack (chainIdToText cid) + , ".sqlite" + ] + let file = dbDir chainDbFileName + doesFileExist file diff --git a/src/Chainweb/Pact/Backend/RelationalCheckpointer.hs b/src/Chainweb/Pact/Backend/RelationalCheckpointer.hs index 8022013eda..ec8d7a17b4 100644 --- a/src/Chainweb/Pact/Backend/RelationalCheckpointer.hs +++ b/src/Chainweb/Pact/Backend/RelationalCheckpointer.hs @@ -233,13 +233,13 @@ doDiscard dbenv = runBlockEnv dbenv $ do -- commitSavepoint Block -doGetEarliest :: HasCallStack => Db logger -> IO (BlockHeight, BlockHash) +doGetEarliest :: HasCallStack => Db logger -> IO (Maybe (BlockHeight, BlockHash)) doGetEarliest dbenv = - runBlockEnv dbenv $ callDb "getLatestBlock" $ \db -> do + runBlockEnv dbenv $ callDb "getEarliestBlock" $ \db -> do r <- qry_ db qtext [RInt, RBlob] >>= mapM go case r of - [] -> fail "Chainweb.Pact.Backend.RelationalCheckpointer.doGetEarliest: no earliest block. This is a bug in chainweb-node." - (!o:_) -> return o + [] -> return Nothing + (!o:_) -> return (Just o) where qtext = "SELECT blockheight, hash FROM BlockHistory \ \ ORDER BY blockheight ASC LIMIT 1" diff --git a/src/Chainweb/Pact/Backend/Types.hs b/src/Chainweb/Pact/Backend/Types.hs index 5ecdc59ce9..b69b277222 100644 --- a/src/Chainweb/Pact/Backend/Types.hs +++ b/src/Chainweb/Pact/Backend/Types.hs @@ -297,9 +297,9 @@ data Checkpointer logger = Checkpointer -- ^ commits pending modifications to block, with the given blockhash , _cpDiscard :: !(IO ()) -- ^ discard pending block changes - , _cpGetEarliestBlock :: !(IO (BlockHeight, BlockHash)) + , _cpGetEarliestBlock :: !(IO (Maybe (BlockHeight, BlockHash))) -- ^ get the checkpointer's idea of the earliest block. The block height - -- is the height of the block of the block hash. + -- is the height of the block of the block hash. , _cpGetLatestBlock :: !(IO (Maybe (BlockHeight, BlockHash))) -- ^ get the checkpointer's idea of the latest block. The block height is -- is the height of the block of the block hash. diff --git a/src/Chainweb/Pact/PactService.hs b/src/Chainweb/Pact/PactService.hs index ee058adc08..15d9c45578 100644 --- a/src/Chainweb/Pact/PactService.hs +++ b/src/Chainweb/Pact/PactService.hs @@ -72,6 +72,7 @@ import Prelude hiding (lookup) import qualified Pact.Gas as P import Pact.Gas.Table +import qualified Pact.JSON.Encode as J import qualified Pact.Interpreter as P import qualified Pact.Types.ChainMeta as P import qualified Pact.Types.Command as P @@ -127,8 +128,7 @@ runPactService ver cid chainwebLogger reqQ mempoolAccess bhDb pdb sqlenv config serviceRequests mempoolAccess reqQ withPactService - :: Logger logger - => CanReadablePayloadCas tbl + :: (Logger logger, CanReadablePayloadCas tbl) => ChainwebVersion -> ChainId -> logger @@ -141,8 +141,8 @@ withPactService withPactService ver cid chainwebLogger bhDb pdb sqlenv config act = withProdRelationalCheckpointer checkpointerLogger initialBlockState sqlenv ver cid $ \checkpointer -> do let !rs = readRewards - !initialParentHeader = ParentHeader $ genesisBlockHeader ver cid - !pse = PactServiceEnv + let !initialParentHeader = ParentHeader $ genesisBlockHeader ver cid + let !pse = PactServiceEnv { _psMempoolAccess = Nothing , _psCheckpointer = checkpointer , _psPdb = pdb @@ -162,9 +162,31 @@ withPactService ver cid chainwebLogger bhDb pdb sqlenv config act = , _psBlockGasLimit = _pactBlockGasLimit config , _psChainId = cid } - !pst = PactServiceState Nothing mempty initialParentHeader P.noSPVSupport - runPactServiceM pst pse $ do + let !pst = PactServiceState Nothing mempty initialParentHeader P.noSPVSupport + + when (_pactFullHistoryRequired config) $ do + mEarliestBlock <- _cpGetEarliestBlock checkpointer + case mEarliestBlock of + Nothing -> do + pure () + Just (earliestBlockHeight, _) -> do + let gHeight = genesisHeight ver cid + when (gHeight /= earliestBlockHeight) $ do + let e = FullHistoryRequired + { _earliestBlockHeight = earliestBlockHeight + , _genesisHeight = gHeight + } + let msg = J.object + [ "details" J..= e + , "message" J..= J.text "Your node has been configured\ + \ to require the full Pact history; however, the full\ + \ history is not available. Perhaps you have compacted\ + \ your Pact state?" + ] + logError_ chainwebLogger (J.encodeText msg) + throwM e + runPactServiceM pst pse $ do -- If the latest header that is stored in the checkpointer was on an -- orphaned fork, there is no way to recover it in the call of -- 'initalPayloadState.readContracts'. We therefore rewind to the latest @@ -906,7 +928,6 @@ chainweb213GasModel = modifiedGasModel _ -> P.milliGasToGas $ fullRunFunction name ga modifiedGasModel = defGasModel { P.runGasModel = \t g -> P.gasToMilliGas (modifiedRunFunction t g) } - getGasModel :: TxContext -> P.GasModel getGasModel ctx | chainweb213Pact (ctxVersion ctx) (ctxChainId ctx) (ctxCurrentBlockHeight ctx) = chainweb213GasModel @@ -914,3 +935,5 @@ getGasModel ctx pactLabel :: (Logger logger) => Text -> PactServiceM logger tbl x -> PactServiceM logger tbl x pactLabel lbl x = localLabel ("pact-request", lbl) x + + diff --git a/src/Chainweb/Pact/PactService/Checkpointer.hs b/src/Chainweb/Pact/PactService/Checkpointer.hs index 5ce2773802..02a3d9e8d0 100644 --- a/src/Chainweb/Pact/PactService/Checkpointer.hs +++ b/src/Chainweb/Pact/PactService/Checkpointer.hs @@ -41,10 +41,10 @@ module Chainweb.Pact.PactService.Checkpointer -- There are two function for restoring the checkpointer for evaluation of back -- code: -- - -- * 'withCheckPointerRewind' and + -- * 'withCheckpointerRewind' and -- * 'withCurrentCheckpointer'. -- - -- 'withCheckPointerRewind' rewinds the checkpointer to the provided parent + -- 'withCheckpointerRewind' rewinds the checkpointer to the provided parent -- header. 'withCurrentCheckpointer' evaluates the pact transaction within the -- context of the current checkpointer state. Both functions update the value of -- '_psParentHeader' at the beginning and the end of each call. @@ -171,7 +171,7 @@ data WithCheckpointerResult a -- -- This function assumes that '_psParentHeader' has been updated to match the -- latest block in the checkpointers. This is guaranteed to be the case after --- calling any of 'rewindTo', 'syncParentHeader', 'withCheckPointerRewind', +-- calling any of 'rewindTo', 'syncParentHeader', 'withCheckpointerRewind', -- 'withCheckPointerWithoutRewind', or 'withCurrentCheckpointer'. -- -- /NOTE:/ @@ -273,7 +273,7 @@ withCheckpointerRewind rewindLimit p caller act = do -- | Run a batch of checkpointer operations, possibly involving the evaluation -- transactions accross several blocks using more than a single call of --- 'withCheckPointerRewind' or 'withCurrentCheckpointer', and persist the final +-- 'withCheckpointerRewind' or 'withCurrentCheckpointer', and persist the final -- state. In case of an failure, the checkpointer is reverted to the initial -- state. -- @@ -308,7 +308,7 @@ withBatchIO runPact act = mask $ \umask -> do -- | Run a batch of checkpointer operations, possibly involving the evaluation -- transactions accross several blocks using more than a single call of --- 'withCheckPointerRewind' or 'withCurrentCheckpointer', and discard the final +-- 'withCheckpointerRewind' or 'withCurrentCheckpointer', and discard the final -- state at the end. -- withDiscardedBatch :: PactServiceM logger tbl a -> PactServiceM logger tbl a diff --git a/src/Chainweb/Pact/PactService/ExecBlock.hs b/src/Chainweb/Pact/PactService/ExecBlock.hs index cd9a6b6d13..08fafb1091 100644 --- a/src/Chainweb/Pact/PactService/ExecBlock.hs +++ b/src/Chainweb/Pact/PactService/ExecBlock.hs @@ -102,8 +102,8 @@ setParentHeader msg ph@(ParentHeader bh) = do -- /NOTE:/ -- -- Any call of this function must occur within a dedicated call to --- 'withChwithCheckpointerRewind', 'withCurrentCheckpointer' or --- 'withCheckPointerWithoutRewind'. +-- 'withCheckpointerRewind', 'withCurrentCheckpointer' or +-- 'withCheckpointerWithoutRewind'. -- execBlock :: (CanReadablePayloadCas tbl, Logger logger) diff --git a/src/Chainweb/Pact/Service/Types.hs b/src/Chainweb/Pact/Service/Types.hs index 9763143fa2..55df2d595d 100644 --- a/src/Chainweb/Pact/Service/Types.hs +++ b/src/Chainweb/Pact/Service/Types.hs @@ -103,6 +103,9 @@ data PactServiceConfig = PactServiceConfig -- ^ whether to write transaction gas logs at INFO , _pactModuleCacheLimit :: !DbCacheLimitBytes -- ^ limit of the database module cache in bytes of corresponding row data + , _pactFullHistoryRequired :: !Bool + -- ^ Whether or not the node requires that the full Pact history be + -- available. Compaction can remove history. } deriving (Eq,Show) data GasPurchaseFailure = GasPurchaseFailure TransactionHash PactError @@ -210,6 +213,10 @@ data PactException { _localRewindExceededLimit :: !RewindLimit , _localRewindRequestedDepth :: !RewindDepth } | LocalRewindGenesisExceeded + | FullHistoryRequired + { _earliestBlockHeight :: !BlockHeight + , _genesisHeight :: !BlockHeight + } deriving (Eq,Generic) instance Show PactException where @@ -239,6 +246,10 @@ instance J.Encode PactException where , "_localRewindRequestedDepth" J..= J.Aeson @Int (fromIntegral $ _rewindDepth $ _localRewindRequestedDepth o) ] build LocalRewindGenesisExceeded = tagged "LocalRewindGenesisExceeded" J.null + build o@(FullHistoryRequired{}) = tagged "FullHistoryRequired" $ J.object + [ "_fullHistoryRequiredEarliestBlockHeight" J..= J.Aeson @Int (fromIntegral $ _earliestBlockHeight o) + , "_fullHistoryRequiredGenesisHeight" J..= J.Aeson @Int (fromIntegral $ _genesisHeight o) + ] tagged :: J.Encode v => Text -> v -> J.Builder tagged t v = J.object diff --git a/src/Chainweb/Pact/Types.hs b/src/Chainweb/Pact/Types.hs index 072ea5ba28..44d59b1f0e 100644 --- a/src/Chainweb/Pact/Types.hs +++ b/src/Chainweb/Pact/Types.hs @@ -479,6 +479,7 @@ testPactServiceConfig = PactServiceConfig , _pactBlockGasLimit = testBlockGasLimit , _pactLogGas = False , _pactModuleCacheLimit = defaultModuleCacheLimit + , _pactFullHistoryRequired = False } -- | This default value is only relevant for testing. In a chainweb-node the @GasLimit@ @@ -668,7 +669,6 @@ execPactServiceM execPactServiceM st env act = execStateT (runReaderT (_unPactServiceM act) env) st - getCheckpointer :: PactServiceM logger tbl (Checkpointer logger) getCheckpointer = view psCheckpointer diff --git a/src/Chainweb/Rosetta/Internal.hs b/src/Chainweb/Rosetta/Internal.hs index 54507281c5..e84e12ff3a 100644 --- a/src/Chainweb/Rosetta/Internal.hs +++ b/src/Chainweb/Rosetta/Internal.hs @@ -186,7 +186,6 @@ genesisTransactions genesisTransactions logs cid txs = pure $ V.toList $ V.map (getGenesisLog logs cid) txs - -- | Matches a single genesis transaction to its coin contract logs. genesisTransaction :: Map TxId [AccountLog] @@ -200,7 +199,6 @@ genesisTransaction logs cid rest target = do V.find (\c -> _crReqKey c == target) rest pure $ getGenesisLog logs cid cr - ------------------------ -- Coinbase Helpers -- ------------------------ @@ -510,7 +508,6 @@ getLatestBlockHeader cutDb cid = do c <- liftIO $ _cut cutDb HM.lookup cid (_cutMap c) ?? RosettaInvalidChain - findBlockHeaderInCurrFork :: CutDb tbl -> ChainId @@ -545,7 +542,6 @@ findBlockHeaderInCurrFork cutDb cid someHeight someHash = do somebh <- liftIO $ seekAncestor db latest (int hi) somebh ?? RosettaInvalidBlockHeight - getBlockOutputs :: forall tbl . CanReadablePayloadCas tbl diff --git a/src/Chainweb/Utils.hs b/src/Chainweb/Utils.hs index b7d7834e3e..a20988ed2e 100644 --- a/src/Chainweb/Utils.hs +++ b/src/Chainweb/Utils.hs @@ -522,6 +522,18 @@ instance HasTextRepresentation Integer where fromText = treadM {-# INLINE fromText #-} +instance HasTextRepresentation Word where + toText = sshow + {-# INLINE toText #-} + fromText = treadM + {-# INLINE fromText #-} + +instance HasTextRepresentation Word64 where + toText = sshow + {-# INLINE toText #-} + fromText = treadM + {-# INLINE fromText #-} + instance HasTextRepresentation UTCTime where toText = T.pack . formatTime defaultTimeLocale iso8601DateTimeFormat {-# INLINE toText #-} diff --git a/src/Chainweb/WebPactExecutionService.hs b/src/Chainweb/WebPactExecutionService.hs index 348db4b5b8..5d0604de2a 100644 --- a/src/Chainweb/WebPactExecutionService.hs +++ b/src/Chainweb/WebPactExecutionService.hs @@ -145,7 +145,6 @@ mkWebPactExecutionService hm = WebPactExecutionService $ PactExecutionService $ "PactExecutionService: Invalid chain ID: " ++ show cid - mkPactExecutionService :: PactQueue -> PactExecutionService diff --git a/test/Chainweb/Test/MultiNode.hs b/test/Chainweb/Test/MultiNode.hs index ae526ebafd..ebb1da26cf 100644 --- a/test/Chainweb/Test/MultiNode.hs +++ b/test/Chainweb/Test/MultiNode.hs @@ -4,6 +4,7 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE MultiWayIf #-} @@ -32,13 +33,13 @@ -- The configuration defines a scaled down, accelerated chain that tries to -- similulate a full-scale chain in a miniaturized settings. -- -module Chainweb.Test.MultiNode ( test, replayTest ) where +module Chainweb.Test.MultiNode ( test, replayTest, compactAndResumeTest ) where import Control.Concurrent import Control.Concurrent.Async import Control.DeepSeq import Control.Exception -import Control.Lens (set, view) +import Control.Lens (set, view, over) import Control.Monad import Data.Aeson @@ -58,6 +59,7 @@ import qualified Streaming.Prelude as S import System.FilePath import System.IO.Temp +import System.Logger.Types qualified as YAL import System.LogLevel import System.Timeout @@ -73,6 +75,9 @@ import Chainweb.Chainweb import Chainweb.Chainweb.Configuration import Chainweb.Chainweb.CutResources import Chainweb.Chainweb.PeerResources +import Chainweb.Pact.Backend.Compaction qualified as C +import Chainweb.Pact.Backend.Types (_sConn) +import Chainweb.Pact.Backend.Utils (withSqliteDb) import Chainweb.Cut import Chainweb.CutDB import Chainweb.Graph @@ -283,6 +288,62 @@ runNodesForSeconds loglevel write baseConf n (Seconds seconds) rdb pactDbDir inn void $ timeout (int seconds * 1_000_000) $ runNodes loglevel write baseConf n rdb pactDbDir inner +-- | Run nodes +-- Each node creates blocks +-- We wait until they've made a sufficient amount of blocks +-- We stop the nodes +-- We open sqlite connections to some of the database dirs and compact them +-- We restart all nodes with the same database dirs +-- We observe that they can make progress +compactAndResumeTest :: () + => LogLevel + -> ChainwebVersion + -> Natural + -> TestTree +compactAndResumeTest logLevel v n = + let + name = "compact-resume" + in + after AllFinish "ConsensusNetwork" $ testCaseSteps name $ \step -> + withTempRocksDb "compact-resume-test-rocks" $ \rdb -> + withSystemTempDirectory "compact-resume-test-pact" $ \pactDbDir -> do + let logFun = step . T.unpack + let logger = genericLogger logLevel logFun + + logFun "phase 1... creating blocks" + -- N.B: This consensus state stuff counts the number of blocks + -- in RocksDB, rather than the number of blocks in all chains + -- on the current cut. This is fine because we ultimately just want + -- to make sure that we are making progress (i.e, new blocks). + stateVar <- newMVar (emptyConsensusState v) + let ct :: Int -> StartedChainweb logger -> IO () + ct = harvestConsensusState logger stateVar + runNodesForSeconds logLevel logFun (multiConfig v n) n 60 rdb pactDbDir ct + Just stats1 <- consensusStateSummary <$> swapMVar stateVar (emptyConsensusState v) + assertGe "average block count before compaction" (Actual $ _statBlockCount stats1) (Expected 50) + logFun $ sshow stats1 + + logFun "phase 2... compacting" + let cid = unsafeChainId 0 + -- compact only half of them + let nids = filter even [0 .. int @_ @Int n - 1] + forM_ nids $ \nid -> do + let dir = pactDbDir show nid + withSqliteDb cid logger dir False $ \sqlEnv -> do + C.withDefaultLogger YAL.Warn $ \cLogger -> do + let cLogger' = over YAL.setLoggerScope (\scope -> ("nodeId",sshow nid) : ("chainId",sshow cid) : scope) cLogger + let flags = [C.NoVacuum, C.NoGrandHash] + let db = _sConn sqlEnv + let bh = BlockHeight 5 + void $ C.compact (C.Target bh) cLogger' db flags + + logFun "phase 3... restarting nodes and ensuring progress" + runNodesForSeconds logLevel logFun (multiConfig v n) n 60 rdb pactDbDir ct + Just stats2 <- consensusStateSummary <$> swapMVar stateVar (emptyConsensusState v) + -- We ensure that we've gotten to at least 1.5x the previous block count + assertGe "average block count post-compaction" (Actual $ _statBlockCount stats2) (Expected (3 * _statBlockCount stats1 `div` 2)) + logFun $ sshow stats2 + replayTest :: LogLevel -> ChainwebVersion diff --git a/test/Chainweb/Test/Pact/PactMultiChainTest.hs b/test/Chainweb/Test/Pact/PactMultiChainTest.hs index 649df5ad04..df18fff525 100644 --- a/test/Chainweb/Test/Pact/PactMultiChainTest.hs +++ b/test/Chainweb/Test/Pact/PactMultiChainTest.hs @@ -360,9 +360,14 @@ runLocal cid' cmd = runLocalWithDepth Nothing cid' cmd runLocalWithDepth :: Maybe RewindDepth -> ChainId -> CmdBuilder -> PactTestM (Either PactException LocalResult) runLocalWithDepth depth cid' cmd = do + pact <- getPactService cid' + cwCmd <- buildCwCmd cmd + liftIO $ _pactLocal pact Nothing Nothing depth cwCmd + +getPactService :: ChainId -> PactTestM PactExecutionService +getPactService cid' = do HM.lookup cid' <$> view menvPacts >>= \case - Just pact -> buildCwCmd cmd >>= - liftIO . _pactLocal pact Nothing Nothing depth + Just pact -> return pact Nothing -> liftIO $ assertFailure $ "No pact service found at chain id " ++ show cid' assertLocalFailure @@ -484,8 +489,6 @@ pact43UpgradeTest = do ]) $ mkKeySetData "k" [sender00] - - chainweb215Test :: PactTestM () chainweb215Test = do @@ -578,8 +581,6 @@ chainweb215Test = do , mkXResumeEvent "sender00" "sender00" 0.0123 sender00Ks "pact" h' (toText cid) "0" ] - - pact431UpgradeTest :: PactTestM () pact431UpgradeTest = do @@ -1360,7 +1361,6 @@ buildXSend caps = MempoolCmdBuilder $ \(MempoolInput _ bh) -> "(coin.transfer-crosschain 'sender00 'sender00 (read-keyset 'k) \"0\" 0.0123)" $ mkKeySetData "k" [sender00] - chain0 :: ChainId chain0 = unsafeChainId 0 @@ -1398,7 +1398,6 @@ setFromHeader bh = set cbChainId (_blockChainId bh) . set cbCreationTime (toTxCreationTime $ _bct $ _blockCreationTime bh) - buildBasic :: PactRPC T.Text -> MempoolCmdBuilder @@ -1417,9 +1416,6 @@ buildBasic' f r = MempoolCmdBuilder $ \(MempoolInput _ bh) -> $ setFromHeader bh $ mkCmd (sshow bh) r - - - -- | Get output on latest cut for chain getPWO :: ChainId -> PactTestM (PayloadWithOutputs,BlockHeader) getPWO chid = do diff --git a/test/Chainweb/Test/Pact/PactReplay.hs b/test/Chainweb/Test/Pact/PactReplay.hs index a393c72523..c62c97d606 100644 --- a/test/Chainweb/Test/Pact/PactReplay.hs +++ b/test/Chainweb/Test/Pact/PactReplay.hs @@ -86,12 +86,12 @@ tests rdb = onRestart :: IO (IORef MemPoolAccess) - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> (String -> IO ()) -> Assertion onRestart mpio iop step = do setOneShotMempool mpio testMemPoolAccess - bdb <- snd <$> iop + (_, _, bdb) <- iop bhdb' <- getBlockHeaderDb cid bdb block <- maxEntry bhdb' step $ "max block has height " <> sshow (_blockHeight block) @@ -160,7 +160,7 @@ dupegenMemPoolAccess = do serviceInitializationAfterFork :: IO (IORef MemPoolAccess) -> BlockHeader - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> Assertion serviceInitializationAfterFork mpio genesisBlock iop = do setOneShotMempool mpio testMemPoolAccess @@ -188,11 +188,11 @@ serviceInitializationAfterFork mpio genesisBlock iop = do restartPact :: IO () restartPact = do - q <- fst <$> iop + (_, q, _) <- iop addRequest q CloseMsg pruneDbs = forM_ cids $ \c -> do - dbs <- snd <$> iop + (_, _, dbs) <- iop db <- getBlockHeaderDb c dbs h <- maxEntry db tableDelete (_chainDbCas db) (casKey $ RankedBlockHeader h) @@ -202,7 +202,7 @@ serviceInitializationAfterFork mpio genesisBlock iop = do firstPlayThrough :: IO (IORef MemPoolAccess) -> BlockHeader - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> Assertion firstPlayThrough mpio genesisBlock iop = do setOneShotMempool mpio testMemPoolAccess @@ -228,7 +228,7 @@ firstPlayThrough mpio genesisBlock iop = do testDupes :: IO (IORef MemPoolAccess) -> BlockHeader - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> Assertion testDupes mpio genesisBlock iop = do setMempool mpio =<< dupegenMemPoolAccess @@ -259,12 +259,12 @@ testDupes mpio genesisBlock iop = do testDeepForkLimit :: IO (IORef MemPoolAccess) -> RewindLimit - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue,TestBlockDb) -> (String -> IO ()) -> Assertion testDeepForkLimit mpio (RewindLimit deepForkLimit) iop step = do setOneShotMempool mpio testMemPoolAccess - bdb <- snd <$> iop + (_, _, bdb) <- iop bhdb <- getBlockHeaderDb cid bdb step "query max db entry" maxblock <- maxEntry bhdb @@ -302,7 +302,7 @@ testDeepForkLimit mpio (RewindLimit deepForkLimit) iop step = do mineBlock :: ParentHeader -> Nonce - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> IO (T3 ParentHeader BlockHeader PayloadWithOutputs) mineBlock ph nonce iop = timeout 5000000 go >>= \case Nothing -> error "PactReplay.mineBlock: Test timeout. Most likely a test case caused a pact service failure that wasn't caught, and the test was blocked while waiting for the result" @@ -311,7 +311,7 @@ mineBlock ph nonce iop = timeout 5000000 go >>= \case go = do -- assemble block without nonce and timestamp - let r = fst <$> iop + let r = (\(_, q, _) -> q) <$> iop mv <- r >>= newBlock noMiner ph payload <- assertNotLeft =<< takeMVar mv @@ -325,7 +325,7 @@ mineBlock ph nonce iop = timeout 5000000 go >>= \case mv' <- r >>= validateBlock bh (payloadWithOutputsToPayloadData payload) void $ assertNotLeft =<< takeMVar mv' - bdb <- snd <$> iop + (_, _, bdb) <- iop let pdb = _bdbPayloadDb bdb addNewPayload pdb payload diff --git a/test/Chainweb/Test/Pact/PactSingleChainTest.hs b/test/Chainweb/Test/Pact/PactSingleChainTest.hs index d0494bedfc..f8942d0edb 100644 --- a/test/Chainweb/Test/Pact/PactSingleChainTest.hs +++ b/test/Chainweb/Test/Pact/PactSingleChainTest.hs @@ -4,6 +4,7 @@ {-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedRecordDot #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RecordWildCards #-} @@ -17,18 +18,24 @@ module Chainweb.Test.Pact.PactSingleChainTest ) where import Control.Arrow ((&&&)) +import Control.Concurrent (forkIO) import Control.Concurrent.MVar import Control.DeepSeq -import Control.Lens hiding ((.=)) +import Control.Lens hiding ((.=), matching) import Control.Monad import Control.Monad.Catch +import Patience qualified as PatienceL +import Patience.Map qualified as PatienceM +import Patience.Map (Delta(..)) import Data.Aeson (object, (.=), Value(..), decodeStrict, eitherDecode) import qualified Data.ByteString.Lazy as BL import Data.Either (isRight, fromRight) import Data.IORef import qualified Data.Map.Strict as M +import Data.Maybe (isJust, isNothing) import qualified Data.Text as T +import Data.Text (Text) import qualified Data.Text.IO as T import qualified Data.Text.Encoding as T import qualified Data.Vector as V @@ -54,29 +61,39 @@ import Pact.JSON.Yaml import Chainweb.BlockCreationTime import Chainweb.BlockHeader +import Chainweb.BlockHeight (BlockHeight(..)) import Chainweb.Graph +import Chainweb.Logger (genericLogger) import Chainweb.Mempool.Mempool import Chainweb.MerkleLogHash (unsafeMerkleLogHash) import Chainweb.Miner.Pact +import Chainweb.Pact.Backend.Compaction qualified as C import Chainweb.Pact.Backend.Types import Chainweb.Pact.Service.BlockValidation hiding (local) -import Chainweb.Pact.Service.PactQueue (PactQueue) +import Chainweb.Pact.Service.PactQueue (PactQueue, newPactQueue) import Chainweb.Pact.Service.Types +import Chainweb.Pact.PactService (runPactService) import Chainweb.Pact.PactService.ExecBlock import Chainweb.Pact.Types import Chainweb.Pact.Utils (emptyPayload) import Chainweb.Payload import Chainweb.Test.Cut.TestBlockDb -import Chainweb.Test.Pact.Utils +import Chainweb.Test.Pact.Utils hiding (compact) +import Chainweb.Test.Pact.Utils qualified as Utils import Chainweb.Test.Utils import Chainweb.Test.TestVersions import Chainweb.Time +import Chainweb.Transaction (ChainwebTransaction) import Chainweb.Utils import Chainweb.Version import Chainweb.Version.Utils +import Chainweb.WebBlockHeaderDB (getWebBlockHeaderDb) import Chainweb.Storage.Table.RocksDB +import System.Logger.Types qualified as LL +import System.LogLevel (LogLevel(..)) + testVersion :: ChainwebVersion testVersion = slowForkingCpmTestVersion petersonChainGraph @@ -103,16 +120,16 @@ tests rdb = testGroup testName , test mempoolRefillTest , test blockGasLimitTest , testTimeout preInsertCheckTimeoutTest + , rosettaFailsWithoutFullHistory rdb + , rewindPastMinBlockHeightFails rdb + , pactStateSamePreAndPostCompaction rdb + , compactionIsIdempotent rdb + , compactionUserTablesDropped rdb ] where testName = "Chainweb.Test.Pact.PactSingleChainTest" - testWithConf f conf = - withDelegateMempool $ \dm -> - withPactTestBlockDb testVersion cid rdb (snd <$> dm) conf $ - f (fst <$> dm) - - test f = testWithConf f testPactServiceConfig - testTimeout f = testWithConf f (testPactServiceConfig { _pactPreInsertCheckTimeout = 1 }) + test = test' rdb + testTimeout = testTimeout' rdb testHistLookup1 = getHistoricalLookupNoTxs "sender00" (assertSender00Bal 100_000_000 "check latest entry for sender00 after a no txs block") @@ -121,8 +138,29 @@ tests rdb = testGroup testName testHistLookup3 = getHistoricalLookupWithTxs "sender00" (assertSender00Bal 9.999998051e7 "check latest entry for sender00 after block with txs") - -forSuccess :: NFData a => String -> IO (MVar (Either PactException a)) -> IO a +testWithConf' :: () + => RocksDb + -> (IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree) + -> PactServiceConfig + -> TestTree +testWithConf' rdb f conf = + withDelegateMempool $ \dm -> + withPactTestBlockDb testVersion cid rdb (snd <$> dm) conf $ + f (fst <$> dm) + +test' :: () + => RocksDb + -> (IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree) + -> TestTree +test' rdb f = testWithConf' rdb f testPactServiceConfig + +testTimeout' :: () + => RocksDb + -> (IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree) + -> TestTree +testTimeout' rdb f = testWithConf' rdb f (testPactServiceConfig { _pactPreInsertCheckTimeout = 5 }) + +forSuccess :: (NFData a, HasCallStack) => String -> IO (MVar (Either PactException a)) -> IO a forSuccess msg mvio = (`catchAllSynchronous` handler) $ do mv <- mvio takeMVar mv >>= \case @@ -131,12 +169,11 @@ forSuccess msg mvio = (`catchAllSynchronous` handler) $ do where handler e = assertFailure $ msg ++ ": exception thrown: " ++ show e - -runBlock :: PactQueue -> TestBlockDb -> TimeSpan Micros -> String -> IO PayloadWithOutputs -runBlock q bdb timeOffset msg = do +runBlock :: (HasCallStack) => PactQueue -> TestBlockDb -> TimeSpan Micros -> IO PayloadWithOutputs +runBlock q bdb timeOffset = do ph <- getParentTestBlockDb bdb cid let blockTime = add timeOffset $ _bct $ _blockCreationTime ph - nb <- forSuccess (msg <> ": newblock") $ + nb <- forSuccess "newBlock" $ newBlock noMiner (ParentHeader ph) q forM_ (chainIds testVersion) $ \c -> do let o | c == cid = nb @@ -146,15 +183,15 @@ runBlock q bdb timeOffset msg = do forSuccess "newBlockAndValidate: validate" $ validateBlock nextH (payloadWithOutputsToPayloadData nb) q -newBlockAndValidate :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +newBlockAndValidate :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree newBlockAndValidate refIO reqIO = testCase "newBlockAndValidate" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO setOneShotMempool refIO goldenMemPool - void $ runBlock q bdb second "newBlockAndValidate" + void $ runBlock q bdb second -newBlockAndValidationFailure :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +newBlockAndValidationFailure :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree newBlockAndValidationFailure refIO reqIO = testCase "newBlockAndValidationFailure" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO setOneShotMempool refIO goldenMemPool ph <- getParentTestBlockDb bdb cid @@ -190,11 +227,327 @@ toRowData v = case eitherDecode encV of where encV = J.encode v -getHistory :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +-- Test that PactService fails if Rosetta is enabled and we don't have all of +-- the history. +-- +-- We do this in two stages: +-- +-- 1: +-- - Start PactService with Rosetta disabled +-- - Run some blocks +-- - Compact to some arbitrary greater-than-genesis height +-- 2: +-- - Start PactService with Rosetta enabled +-- - Catch the exception that should arise at the start of PactService, +-- when performing the history check +rosettaFailsWithoutFullHistory :: () + => RocksDb + -> TestTree +rosettaFailsWithoutFullHistory rdb = + withTemporaryDir $ \iodir -> + withSqliteDb cid iodir $ \sqlEnvIO -> + withDelegateMempool $ \dm -> + sequentialTestGroup "rosettaFailsWithoutFullHistory" AllSucceed + [ + -- Run some blocks and then compact + withPactTestBlockDb' testVersion cid rdb sqlEnvIO mempty testPactServiceConfig $ \reqIO -> + testCase "runBlocksAndCompact" $ do + (sqlEnv, q, bdb) <- reqIO + + mempoolRef <- fmap (pure . fst) dm + + setOneShotMempool mempoolRef goldenMemPool + replicateM_ 10 $ void $ runBlock q bdb second + + Utils.compact LL.Error [C.NoVacuum, C.NoGrandHash] sqlEnv (C.Target (BlockHeight 5)) + + -- This needs to run after the previous test + -- Annoyingly, we must inline the PactService util starts here. + -- ResourceT will help clean all this up + , testCase "PactService Should fail" $ do + pactQueue <- newPactQueue 2000 + blockDb <- mkTestBlockDb testVersion rdb + bhDb <- getWebBlockHeaderDb (_bdbWebBlockHeaderDb blockDb) cid + sqlEnv <- sqlEnvIO + mempool <- fmap snd dm + let payloadDb = _bdbPayloadDb blockDb + let cfg = testPactServiceConfig { _pactFullHistoryRequired = True } + let logger = genericLogger System.LogLevel.Error (\_ -> return ()) + e <- try $ runPactService testVersion cid logger pactQueue mempool bhDb payloadDb sqlEnv cfg + case e of + Left (FullHistoryRequired {}) -> do + pure () + Left err -> do + assertFailure $ "Expected FullHistoryRequired exception, instead got: " ++ show err + Right _ -> do + assertFailure "Expected FullHistoryRequired exception, instead there was no exception at all." + ] + +rewindPastMinBlockHeightFails :: () + => RocksDb + -> TestTree +rewindPastMinBlockHeightFails rdb = + compactionSetup "rewindPastMinBlockHeightFails" rdb testPactServiceConfig $ \cr -> do + setOneShotMempool cr.mempoolRef goldenMemPool + replicateM_ 10 $ runBlock cr.pactQueue cr.blockDb second + + Utils.compact LL.Error [C.NoVacuum, C.NoGrandHash] cr.sqlEnv (C.Target (BlockHeight 5)) + + -- Genesis block header; compacted away by now + let bh = genesisBlockHeader testVersion cid + + syncResult <- readMVar =<< pactSyncToBlock bh cr.pactQueue + case syncResult of + Left (PactInternalError {}) -> do + return () + Left err -> do + assertFailure $ "Expected a PactInternalError, but got: " ++ show err + Right _ -> do + assertFailure "Expected an exception, but didn't encounter one." + +pactStateSamePreAndPostCompaction :: () + => RocksDb + -> TestTree +pactStateSamePreAndPostCompaction rdb = + compactionSetup "pactStateSamePreAndPostCompaction" rdb testPactServiceConfig $ \cr -> do + let numBlocks :: Num a => a + numBlocks = 100 + + setOneShotMempool cr.mempoolRef goldenMemPool + + let makeTx :: Int -> BlockHeader -> IO ChainwebTransaction + makeTx nth bh = buildCwCmd + $ set cbSigners [mkSigner' sender00 [mkGasCap, mkTransferCap "sender00" "sender01" 1.0]] + $ setFromHeader bh + $ mkCmd (sshow (nth, bh)) + $ mkExec' "(coin.transfer \"sender00\" \"sender01\" 1.0)" + + supply <- newIORef @Int 0 + madeTx <- newIORef @Bool False + replicateM_ numBlocks $ do + setMempool cr.mempoolRef $ mempty { + mpaGetBlock = \_ _ _ _ bh -> do + madeTxYet <- readIORef madeTx + if madeTxYet + then do + pure mempty + else do + n <- atomicModifyIORef' supply $ \a -> (a + 1, a) + tx <- makeTx n bh + writeIORef madeTx True + pure $ V.fromList [tx] + } + void $ runBlock cr.pactQueue cr.blockDb second + writeIORef madeTx False + + let db = _sConn cr.sqlEnv + + statePreCompaction <- getLatestPactState db + + Utils.compact LL.Error [C.NoVacuum, C.NoGrandHash] cr.sqlEnv (C.Target (BlockHeight numBlocks)) + + statePostCompaction <- getLatestPactState db + + let stateDiff = M.filter (not . PatienceM.isSame) (PatienceM.diff statePreCompaction statePostCompaction) + when (not (null stateDiff)) $ do + T.putStrLn "" + forM_ (M.toList stateDiff) $ \(tbl, delta) -> do + T.putStrLn "" + T.putStrLn tbl + case delta of + Same _ -> do + pure () + Old x -> do + putStrLn $ "a pre-only value appeared in the pre- and post-compaction diff: " ++ show x + New x -> do + putStrLn $ "a post-only value appeared in the pre- and post-compaction diff: " ++ show x + Delta x1 x2 -> do + let daDiff = M.filter (not . PatienceM.isSame) (PatienceM.diff x1 x2) + forM_ daDiff $ \item -> do + case item of + Old x -> do + putStrLn $ "old: " ++ show x + New x -> do + putStrLn $ "new: " ++ show x + Same _ -> do + pure () + Delta x y -> do + putStrLn $ "old: " ++ show x + putStrLn $ "new: " ++ show y + putStrLn "" + assertFailure "pact state check failed" + +compactionIsIdempotent :: () + => RocksDb + -> TestTree +compactionIsIdempotent rdb = + compactionSetup "compactionIdempotent" rdb testPactServiceConfig $ \cr -> do + let numBlocks :: Num a => a + numBlocks = 100 + + setOneShotMempool cr.mempoolRef goldenMemPool + + let makeTx :: Int -> BlockHeader -> IO ChainwebTransaction + makeTx nth bh = buildCwCmd + $ set cbSigners [mkSigner' sender00 [mkGasCap, mkTransferCap "sender00" "sender01" 1.0]] + $ setFromHeader bh + $ mkCmd (sshow (nth, bh)) + $ mkExec' "(coin.transfer \"sender00\" \"sender01\" 1.0)" + + supply <- newIORef @Int 0 + madeTx <- newIORef @Bool False + replicateM_ numBlocks $ do + setMempool cr.mempoolRef $ mempty { + mpaGetBlock = \_ _ _ _ bh -> do + madeTxYet <- readIORef madeTx + if madeTxYet + then do + pure mempty + else do + n <- atomicModifyIORef' supply $ \a -> (a + 1, a) + tx <- makeTx n bh + writeIORef madeTx True + pure $ V.fromList [tx] + } + void $ runBlock cr.pactQueue cr.blockDb second + writeIORef madeTx False + + let db = _sConn cr.sqlEnv + + let compact h = + Utils.compact LL.Error [C.NoVacuum, C.NoGrandHash] cr.sqlEnv h + + let compactionHeight = C.Target (BlockHeight numBlocks) + compact compactionHeight + statePostCompaction1 <- getPactUserTables db + compact compactionHeight + statePostCompaction2 <- getPactUserTables db + + let stateDiff = M.filter (not . PatienceM.isSame) (PatienceM.diff statePostCompaction1 statePostCompaction2) + when (not (null stateDiff)) $ do + T.putStrLn "" + forM_ (M.toList stateDiff) $ \(tbl, delta) -> do + T.putStrLn "" + T.putStrLn tbl + case delta of + Same _ -> do + pure () + Old x -> do + putStrLn $ "a pre-only value appeared in the compaction idempotency diff: " ++ show x + New x -> do + putStrLn $ "a post-only value appeared in the compaction idempotency diff: " ++ show x + Delta x1 x2 -> do + let daDiff = PatienceL.pairItems (\a b -> rowKey a == rowKey b) (PatienceL.diff x1 x2) + forM_ daDiff $ \item -> do + case item of + Old x -> do + putStrLn $ "old: " ++ show x + New x -> do + putStrLn $ "new: " ++ show x + Same _ -> do + pure () + Delta x y -> do + putStrLn $ "old: " ++ show x + putStrLn $ "new: " ++ show y + putStrLn "" + assertFailure "pact state check failed" + +-- | Test that user tables created before the compaction height are kept, +-- while those created after the compaction height are dropped. +compactionUserTablesDropped :: () + => RocksDb + -> TestTree +compactionUserTablesDropped rdb = + let + -- creating a module uses about 60k gas. this is + -- that plus some change. + gasLimit :: GasLimit + gasLimit = 70_000 + + pactCfg = testPactServiceConfig { + _pactBlockGasLimit = gasLimit + } + in + compactionSetup "compactionUserTablesDropped" rdb pactCfg $ \cr -> do + let numBlocks :: Num a => a + numBlocks = 100 + let halfwayPoint :: Integral a => a + halfwayPoint = numBlocks `div` 2 + + setOneShotMempool cr.mempoolRef goldenMemPool + + let createTable :: Int -> Text -> IO ChainwebTransaction + createTable n tblName = do + let tx = T.unlines + [ "(namespace 'free)" + , "(module m" <> sshow n <> " G" + , " (defcap G () true)" + , " (defschema empty-schema)" + , " (deftable " <> tblName <> ":{empty-schema})" + , ")" + , "(create-table " <> tblName <> ")" + ] + buildCwCmd + $ signSender00 + $ set cbGasLimit gasLimit + $ mkCmd ("createTable-" <> tblName <> "-" <> sshow n) + $ mkExec tx + $ mkKeySetData "sender00" [sender00] + + let beforeTable = "test_before" + let afterTable = "test_after" + + supply <- newIORef @Int 0 + madeBeforeTable <- newIORef @Bool False + madeAfterTable <- newIORef @Bool False + replicateM_ numBlocks $ do + setMempool cr.mempoolRef $ mempty { + mpaGetBlock = \_ _ mBlockHeight _ _ -> do + let mkTable madeRef tbl = do + madeYet <- readIORef madeRef + if madeYet + then do + pure mempty + else do + n <- atomicModifyIORef' supply $ \a -> (a + 1, a) + tx <- createTable n tbl + writeIORef madeRef True + pure (V.fromList [tx]) + + if mBlockHeight <= halfwayPoint + then do + mkTable madeBeforeTable beforeTable + else do + mkTable madeAfterTable afterTable + } + void $ runBlock cr.pactQueue cr.blockDb second + + let freeBeforeTbl = "free.m0_" <> beforeTable + let freeAfterTbl = "free.m1_" <> afterTable + + let db = _sConn cr.sqlEnv + + statePre <- getPactUserTables db + let assertExists tbl = do + let msg = "Table " ++ T.unpack tbl ++ " should exist pre-compaction, but it doesn't." + assertBool msg (isJust (M.lookup tbl statePre)) + assertExists freeBeforeTbl + assertExists freeAfterTbl + + Utils.compact LL.Error [C.NoVacuum, C.NoGrandHash] cr.sqlEnv (C.Target (BlockHeight halfwayPoint)) + + statePost <- getPactUserTables db + flip assertBool (isJust (M.lookup freeBeforeTbl statePost)) $ + T.unpack beforeTable ++ " was dropped; it wasn't supposed to be." + + flip assertBool (isNothing (M.lookup freeAfterTbl statePost)) $ + T.unpack afterTable ++ " wasn't dropped; it was supposed to be." + +getHistory :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree getHistory refIO reqIO = testCase "getHistory" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO setOneShotMempool refIO goldenMemPool - void $ runBlock q bdb second "getHistory" + void $ runBlock q bdb second h <- getParentTestBlockDb bdb cid mv <- pactBlockTxHistory h (UserTables "coin_coin-table") q @@ -208,7 +561,7 @@ getHistory refIO reqIO = testCase "getHistory" $ do , "keys" .= ["368820f80c324bbc7c2b0610688a7da43e39f91d118732671cd9c7500ff43cca" :: T.Text] ] - , "balance" .= (Number 99999900.0) + , "balance" .= Number 99_999_900.0 ])]) (M.lookup 10 hist) -- and transaction txids @@ -236,30 +589,29 @@ getHistoricalLookupNoTxs :: T.Text -> (Maybe (TxLog RowData) -> IO ()) -> IO (IORef MemPoolAccess) - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree -getHistoricalLookupNoTxs key assertF refIO reqIO = testCase msg $ do - (q,bdb) <- reqIO - setOneShotMempool refIO mempty - void $ runBlock q bdb second msg - h <- getParentTestBlockDb bdb cid - histLookup q h key >>= assertF - where msg = T.unpack $ "getHistoricalLookupNoTxs: " <> key +getHistoricalLookupNoTxs key assertF refIO reqIO = + testCase (T.unpack ("getHistoricalLookupNoTxs: " <> key)) $ do + (_, q, bdb) <- reqIO + setOneShotMempool refIO mempty + void $ runBlock q bdb second + h <- getParentTestBlockDb bdb cid + histLookup q h key >>= assertF getHistoricalLookupWithTxs :: T.Text -> (Maybe (TxLog RowData) -> IO ()) -> IO (IORef MemPoolAccess) - -> IO (PactQueue,TestBlockDb) + -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree -getHistoricalLookupWithTxs key assertF refIO reqIO = testCase msg $ do - (q,bdb) <- reqIO - setOneShotMempool refIO goldenMemPool - void $ runBlock q bdb second msg - h <- getParentTestBlockDb bdb cid - histLookup q h key >>= assertF - where msg = T.unpack $ "getHistoricalLookupWithTxs: " <> key - +getHistoricalLookupWithTxs key assertF refIO reqIO = + testCase (T.unpack ("getHistoricalLookupWithTxs: " <> key)) $ do + (_, q, bdb) <- reqIO + setOneShotMempool refIO goldenMemPool + void $ runBlock q bdb second + h <- getParentTestBlockDb bdb cid + histLookup q h key >>= assertF histLookup :: PactQueue -> BlockHeader -> T.Text -> IO (Maybe (TxLog RowData)) histLookup q bh k = do @@ -280,23 +632,23 @@ assertSender00Bal bal msg hist = ]))) hist -newBlockRewindValidate :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +newBlockRewindValidate :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree newBlockRewindValidate mpRefIO reqIO = testCase "newBlockRewindValidate" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO setOneShotMempool mpRefIO chainDataMemPool cut0 <- readMVar $ _bdbCut bdb -- genesis cut -- cut 1a - void $ runBlock q bdb second "newBlockRewindValidate-1a" + void $ runBlock q bdb second cut1a <- readMVar $ _bdbCut bdb -- rewind, cut 1b void $ swapMVar (_bdbCut bdb) cut0 - void $ runBlock q bdb second "newBlockRewindValidate-1b" + void $ runBlock q bdb second -- rewind to cut 1a to trigger replay with chain data bug void $ swapMVar (_bdbCut bdb) cut1a - void $ runBlock q bdb (secondsToTimeSpan 2) "newBlockRewindValidate-2" + void $ runBlock q bdb (secondsToTimeSpan 2) where @@ -309,7 +661,6 @@ newBlockRewindValidate mpRefIO reqIO = testCase "newBlockRewindValidate" $ do $ mkExec' "(chain-data)" } - signSender00 :: CmdBuilder -> CmdBuilder signSender00 = set cbSigners [mkSigner' sender00 []] @@ -324,9 +675,9 @@ pattern BlockGasLimitError <- Left (PactInternalError (decodeStrict . T.encodeUtf8 -> Just (PactExceptionTag "BlockGasLimitExceeded"))) -- this test relies on block gas errors being thrown before other Pact errors. -blockGasLimitTest :: HasCallStack => IO (IORef MemPoolAccess) -> IO (PactQueue, TestBlockDb) -> TestTree +blockGasLimitTest :: HasCallStack => IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree blockGasLimitTest _ reqIO = testCase "blockGasLimitTest" $ do - (q,_) <- reqIO + (_, q, _) <- reqIO let useGas g = do @@ -379,26 +730,26 @@ blockGasLimitTest _ reqIO = testCase "blockGasLimitTest" $ do _ -> return () -mempoolRefillTest :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +mempoolRefillTest :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree mempoolRefillTest mpRefIO reqIO = testCase "mempoolRefillTest" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO supply <- newMVar (0 :: Int) mp supply [ ( 0, [goodTx, goodTx] ), ( 1, [badTx] ) ] - runBlock q bdb second "mempoolRefillTest-1" >>= checkCount 2 + runBlock q bdb second >>= checkCount 2 mp supply [ ( 0, [goodTx, goodTx] ), ( 1, [goodTx, badTx] ) ] - runBlock q bdb second "mempoolRefillTest-2" >>= checkCount 3 + runBlock q bdb second >>= checkCount 3 mp supply [ ( 0, [badTx, goodTx] ), ( 1, [goodTx, badTx] ) ] - runBlock q bdb second "mempoolRefillTest-3" >>= checkCount 2 + runBlock q bdb second >>= checkCount 2 mp supply [ ( 0, [badTx] ), ( 1, [goodTx, goodTx] ) ] - runBlock q bdb second "mempoolRefillTest-4" >>= checkCount 2 + runBlock q bdb second >>= checkCount 2 mp supply [ ( 0, [goodTx, goodTx] ), ( 1, [badTx, badTx] ) ] - runBlock q bdb second "mempoolRefillTest-5" >>= checkCount 2 + runBlock q bdb second >>= checkCount 2 where @@ -432,26 +783,24 @@ mempoolRefillTest mpRefIO reqIO = testCase "mempoolRefillTest" $ do setFromHeader bh . mkCmd nonce - - -moduleNameFork :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +moduleNameFork :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree moduleNameFork mpRefIO reqIO = testCase "moduleNameFork" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO -- install in free in block 1 setOneShotMempool mpRefIO (moduleNameMempool "free" "test") - void $ runBlock q bdb second "moduleNameFork-1" + void $ runBlock q bdb second -- install in user in block 2 setOneShotMempool mpRefIO (moduleNameMempool "user" "test") - void $ runBlock q bdb second "moduleNameFork-1" + void $ runBlock q bdb second -- do something else post-fork setOneShotMempool mpRefIO (moduleNameMempool "free" "test2") - void $ runBlock q bdb second "moduleNameFork-1" + void $ runBlock q bdb second setOneShotMempool mpRefIO (moduleNameMempool "user" "test2") - void $ runBlock q bdb second "moduleNameFork-1" + void $ runBlock q bdb second -- TODO this test doesn't actually validate, I turn on Debug and make sure it -- goes well. @@ -474,16 +823,16 @@ moduleNameMempool ns mn = mempty mkExec' code -mempoolCreationTimeTest :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +mempoolCreationTimeTest :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree mempoolCreationTimeTest mpRefIO reqIO = testCase "mempoolCreationTimeTest" $ do - (q,bdb) <- reqIO + (_, q, bdb) <- reqIO let start@(Time startSpan) :: Time Micros = Time (TimeSpan (Micros 100_000_000)) s30 = scaleTimeSpan (30 :: Int) second s15 = scaleTimeSpan (15 :: Int) second -- b1 block time is start - void $ runBlock q bdb startSpan "mempoolCreationTimeTest-1" + void $ runBlock q bdb startSpan -- do pre-insert check with transaction at start + 15s @@ -493,7 +842,7 @@ mempoolCreationTimeTest mpRefIO reqIO = testCase "mempoolCreationTimeTest" $ do setOneShotMempool mpRefIO $ mp tx -- b2 will be made at start + 30s - void $ runBlock q bdb s30 "mempoolCreationTimeTest-2" + void $ runBlock q bdb s30 where @@ -514,9 +863,9 @@ mempoolCreationTimeTest mpRefIO reqIO = testCase "mempoolCreationTimeTest" $ do unless (V.and oks) $ throwM $ userError "Insert failed" return txs -preInsertCheckTimeoutTest :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +preInsertCheckTimeoutTest :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree preInsertCheckTimeoutTest _ reqIO = testCase "preInsertCheckTimeoutTest" $ do - (q,_) <- reqIO + (_, q, _) <- reqIO coinV3 <- T.readFile "pact/coin-contract/v3/coin-v3.pact" coinV4 <- T.readFile "pact/coin-contract/v4/coin-v4.pact" @@ -543,9 +892,9 @@ preInsertCheckTimeoutTest _ reqIO = testCase "preInsertCheckTimeoutTest" $ do rs <- forSuccess "preInsertCheckTimeoutTest" $ pactPreInsertCheck (V.fromList [txCoinV3, txCoinV4, txCoinV5]) q assertBool ("should be InsertErrorTimedOut but got " ++ show rs) $ V.and $ V.map (== Left InsertErrorTimedOut) rs -badlistNewBlockTest :: IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +badlistNewBlockTest :: IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree badlistNewBlockTest mpRefIO reqIO = testCase "badlistNewBlockTest" $ do - (reqQ,_) <- reqIO + (_, reqQ, _) <- reqIO let hashToTxHashList = V.singleton . requestKeyToTransactionHash . RequestKey . toUntypedHash @'Blake2b_256 badHashRef <- newIORef $ hashToTxHashList initialHash badTx <- buildCwCmd @@ -567,9 +916,9 @@ badlistNewBlockTest mpRefIO reqIO = testCase "badlistNewBlockTest" $ do } -goldenNewBlock :: String -> MemPoolAccess -> IO (IORef MemPoolAccess) -> IO (PactQueue,TestBlockDb) -> TestTree +goldenNewBlock :: String -> MemPoolAccess -> IO (IORef MemPoolAccess) -> IO (SQLiteEnv, PactQueue, TestBlockDb) -> TestTree goldenNewBlock name mp mpRefIO reqIO = golden name $ do - (reqQ,_) <- reqIO + (_, reqQ, _) <- reqIO setOneShotMempool mpRefIO mp resp <- forSuccess ("goldenNewBlock:" ++ name) $ newBlock noMiner (ParentHeader genesisHeader) reqQ @@ -624,3 +973,44 @@ goldenMemPool = mempty mkCmd ("1" <> sshow n) $ mkExec code $ mkKeySetData "test-admin-keyset" [sender00] + +data CompactionResources = CompactionResources + { mempoolRef :: IO (IORef MemPoolAccess) + , mempool :: MemPoolAccess + , sqlEnv :: SQLiteEnv + , pactQueue :: PactQueue + , blockDb :: TestBlockDb + } + +compactionSetup :: () + => String + -- ^ test pattern + -> RocksDb + -> PactServiceConfig + -> (CompactionResources -> IO ()) + -> TestTree +compactionSetup pat rdb pactCfg f = + withTemporaryDir $ \iodir -> + withSqliteDb cid iodir $ \sqlEnvIO -> + withDelegateMempool $ \dm -> + testCase pat $ do + blockDb <- mkTestBlockDb testVersion rdb + bhDb <- getWebBlockHeaderDb (_bdbWebBlockHeaderDb blockDb) cid + let payloadDb = _bdbPayloadDb blockDb + sqlEnv <- sqlEnvIO + (mempoolRef, mempool) <- do + (ref, nonRef) <- dm + pure (pure ref, nonRef) + pactQueue <- newPactQueue 2000 + + let logger = genericLogger System.LogLevel.Error (\_ -> return ()) + + void $ forkIO $ runPactService testVersion cid logger pactQueue mempool bhDb payloadDb sqlEnv pactCfg + + f $ CompactionResources + { mempoolRef = mempoolRef + , mempool = mempool + , sqlEnv = sqlEnv + , pactQueue = pactQueue + , blockDb = blockDb + } diff --git a/test/Chainweb/Test/Pact/RemotePactTest.hs b/test/Chainweb/Test/Pact/RemotePactTest.hs index 192daf5539..f6fa543e8c 100644 --- a/test/Chainweb/Test/Pact/RemotePactTest.hs +++ b/test/Chainweb/Test/Pact/RemotePactTest.hs @@ -3,6 +3,7 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE OverloadedStrings #-} @@ -37,21 +38,25 @@ import Control.Monad.IO.Class import qualified Data.Aeson as A import Data.Aeson.Lens hiding (values) +import Data.Bifunctor (first) +import Control.Monad.Trans.Except (runExceptT, except) +import Control.Monad.Except (throwError) import qualified Data.ByteString as BS import qualified Data.ByteString.Lazy as LBS import qualified Data.ByteString.Short as SB +import Data.IORef (modifyIORef', newIORef, readIORef) import Data.Word (Word64) import Data.Default (def) import Data.Foldable (toList) -import qualified Data.HashMap.Strict as HashMap +import qualified Data.HashMap.Strict as HM import qualified Data.List as L import qualified Data.List.NonEmpty as NEL import qualified Data.Map.Strict as M import Data.Maybe import Data.Text (Text) import qualified Data.Text as T - -import Numeric.Natural +import qualified Data.Text.Encoding as T +import System.Logger.Types (LogLevel(..)) import Servant.Client @@ -72,6 +77,8 @@ import Pact.Types.Hash (Hash(..)) import qualified Pact.Types.PactError as Pact import Pact.Types.PactValue import Pact.Types.Pretty +import Pact.Types.Persistence (RowKey(..), TxLog(..)) +import Pact.Types.RowData (RowData(..)) import Pact.Types.Term -- internal modules @@ -79,11 +86,15 @@ import Pact.Types.Term import Chainweb.ChainId import Chainweb.Graph import Chainweb.Mempool.Mempool +import Chainweb.Pact.Backend.Compaction qualified as C +import Chainweb.Pact.Backend.Utils qualified as Backend +import Chainweb.Pact.Backend.Types (SQLiteEnv(..)) import Chainweb.Pact.RestAPI.Client import Chainweb.Pact.RestAPI.EthSpv import Chainweb.Pact.Service.Types import Chainweb.Pact.Validations (defaultMaxTTL) import Chainweb.Test.Pact.Utils +import Chainweb.Test.Pact.Utils qualified as Utils import Chainweb.Test.RestAPI.Utils import Chainweb.Test.Utils import Chainweb.Test.TestVersions @@ -93,11 +104,10 @@ import Chainweb.Version import Chainweb.Version.Mainnet import Chainweb.Storage.Table.RocksDB - -- -------------------------------------------------------------------------- -- -- Global Settings -nNodes :: Natural +nNodes :: Word nNodes = 1 v :: ChainwebVersion @@ -135,6 +145,16 @@ tests rdb = testGroup "Chainweb.Test.Pact.RemotePactTest" withResource' getCurrentTimeIntegral $ \(iotm :: IO (Time Micros)) -> let cenv = _getServiceClientEnv <$> net iot = toTxCreationTime <$> iotm + pactDir = do + m <- _getNodeDbDirs <$> net + -- This looks up the pactDbDir for node 0. This is + -- kind of a hack, because there is only one node in + -- this test. However, it doesn't matter much, because + -- we are dealing with both submitting /local txs + -- and compaction, so picking an arbitrary node + -- to run these two operations on is fine. + pure (fst (head m)) + in testGroup "remote pact tests" [ withResourceT (liftIO $ join $ withRequestKeys <$> iot <*> cenv) $ \reqkeys -> golden "remote-golden" $ join $ responseGolden <$> cenv <*> reqkeys @@ -153,6 +173,9 @@ tests rdb = testGroup "Chainweb.Test.Pact.RemotePactTest" , after AllSucceed "remote spv" $ testCase "trivialLocalCheck" $ join $ localTest <$> iot <*> cenv + , after AllSucceed "remote spv" $ + testCase "txlogsCompactionTest" $ + join $ txlogsCompactionTest <$> iot <*> cenv <*> pactDir , after AllSucceed "remote spv" $ testCase "localChainData" $ join $ localChainDataTest <$> iot <*> cenv @@ -186,10 +209,190 @@ tests rdb = testGroup "Chainweb.Test.Pact.RemotePactTest" responseGolden :: ClientEnv -> RequestKeys -> IO LBS.ByteString responseGolden cenv rks = do PollResponses theMap <- polling cid cenv rks ExpectPactResult - let values = mapMaybe (\rk -> _crResult <$> HashMap.lookup rk theMap) + let values = mapMaybe (\rk -> _crResult <$> HM.lookup rk theMap) (NEL.toList $ _rkRequestKeys rks) return $ foldMap J.encode values +-- | Check that txlogs don't problematically access history +-- post-compaction. +-- +-- At a high level, the test does this: +-- - Submits a tx that creates a module with a table named `persons`. +-- +-- This module exposes a few functions for reading, inserting, +-- and overwriting rows to the `persons` table. +-- +-- The tx also inserts some people into `persons` for +-- some initial state. +-- +-- This module also exposes a way to access the `txlogs` +-- of the `persons` table (what this test is concerned with). +-- +-- - Submits a tx that overwrites a row in the +-- `persons` table. +-- +-- - Compacts to the latest blockheight on each chain. This should +-- get rid of any out-of-date rows. +-- +-- - Submits a /local tx that reads the `txlogs` on the `persons` +-- table. Call this `txLogs`. +-- +-- If this read fails, Pact is doing something problematic! +-- +-- If this read doesn't fail, we need to check that `txLogs` +-- matches the latest pact state post-compaction. Because +-- compaction sweeps away the out-of-date rows, they shouldn't +-- appear in the `txLogs` anymore, and the two should be equivalent. +txlogsCompactionTest :: Pact.TxCreationTime -> ClientEnv -> FilePath -> IO () +txlogsCompactionTest t cenv pactDbDir = do + let cmd :: Text -> Text -> CmdBuilder + cmd nonce tx = do + set cbSigners [mkSigner' sender00 []] + $ set cbTTL defaultMaxTTL + $ set cbCreationTime t + $ set cbChainId cid + $ set cbNetworkId (Just v) + $ mkCmd nonce + $ mkExec tx + $ mkKeySetData "sender00" [sender00] + + createTableTx <- buildTextCmd + $ set cbGasLimit 300_000 + $ cmd "create-table-persons" + $ T.unlines + [ "(namespace 'free)" + , "(module m0 G" + , " (defcap G () true)" + , " (defschema person" + , " name:string" + , " age:integer" + , " )" + , " (deftable persons:{person})" + , " (defun read-persons (k) (read persons k))" + , " (defun insert-persons (id name age) (insert persons id { 'name:name, 'age:age }))" + , " (defun write-persons (id name age) (write persons id { 'name:name, 'age:age }))" + , " (defun persons-txlogs (i) (map (txlog persons) (txids persons i)))" + , ")" + , "(create-table persons)" + , "(insert-persons \"A\" \"Lindsey Lohan\" 42)" + , "(insert-persons \"B\" \"Nico Robin\" 30)" + , "(insert-persons \"C\" \"chessai\" 420)" + ] + + nonceSupply <- newIORef @Word 1 -- starts at 1 since 0 is always the create-table tx + let nextNonce = do + cur <- readIORef nonceSupply + modifyIORef' nonceSupply (+ 1) + pure cur + + let submitAndCheckTx tx = do + submitResult <- flip runClientM cenv $ + pactSendApiClient v cid $ SubmitBatch $ NEL.fromList [tx] + case submitResult of + Left err -> do + assertFailure $ "Error when sending tx: " ++ show err + Right rks -> do + PollResponses m <- polling cid cenv rks ExpectPactResult + case HM.lookup (NEL.head (_rkRequestKeys rks)) m of + Just cr -> do + case _crResult cr of + PactResult (Left err) -> do + assertFailure $ "validation failure on tx: " ++ show err + PactResult _ -> do + pure () + Nothing -> do + assertFailure "impossible" + + submitAndCheckTx createTableTx + + let getLatestState :: IO (M.Map RowKey RowData) + getLatestState = C.withDefaultLogger Error $ \logger -> do + Backend.withSqliteDb cid logger pactDbDir False $ \(SQLiteEnv db _) -> do + st <- Utils.getLatestPactState db + case M.lookup "free.m0_persons" st of + Just ps -> fmap M.fromList $ forM (M.toList ps) $ \(rkBytes, rdBytes) -> do + let rk = RowKey (T.decodeUtf8 rkBytes) + case A.eitherDecodeStrict' @RowData rdBytes of + Left err -> do + assertFailure $ "Failed decoding rowdata: " ++ err + Right rd -> do + pure (rk, rd) + Nothing -> error "getting state of free.m0_persons failed" + + let createTxLogsTx :: Word -> IO (Command Text) + createTxLogsTx n = do + -- cost is about 360k. + -- cost = flatCost(module) + flatCost(map) + flatCost(txIds) + numTxIds * (costOf(txlog)) + C + -- = 60_000 + 4 + 100_000 + 2 * 100_000 + C + -- = 360_004 + C + -- Note there are two transactions that write to `persons`, which is + -- why `numTxIds` = 2 (and not the number of rows). + let gasLimit = 400_000 + buildTextCmd + $ set cbGasLimit gasLimit + $ cmd ("test-txlogs-" <> sshow n) + $ T.unlines + [ "(namespace 'free)" + , "(module m" <> sshow n <> " G" + , " (defcap G () true)" + , " (defun persons-txlogs (i) (m0.persons-txlogs i))" + , ")" + , "(persons-txlogs 0)" + ] + + let createWriteTx :: Word -> IO (Command Text) + createWriteTx n = do + -- module = 60k, write = 100 + let gasLimit = 70_000 + buildTextCmd + $ set cbGasLimit gasLimit + $ cmd ("test-write-" <> sshow n) + $ T.unlines + [ "(namespace 'free)" + , "(module m" <> sshow n <> " G" + , " (defcap G () true)" + , " (defun test-write (id name age) (m0.write-persons id name age))" + , ")" + , "(test-write \"C\" \"chessai\" 69)" + ] + + let -- This can't be a Map because the RowKeys aren't + -- necessarily unique, unlike in `getLatestPactState`. + crGetTxLogs :: CommandResult Hash -> IO [(RowKey, A.Value)] + crGetTxLogs cr = do + e <- runExceptT $ do + pv0 <- except (first show (_pactResult (_crResult cr))) + case pv0 of + PList arr -> do + fmap concat $ forM arr $ \pv -> do + txLogs <- except (A.eitherDecode @[TxLog A.Value] (J.encode pv)) + pure $ flip map txLogs $ \txLog -> + (RowKey (_txKey txLog), _txValue txLog) + _ -> do + throwError "expected outermost PList when decoding TxLogs" + case e of + Left err -> do + assertFailure $ "crGetTxLogs failed: " ++ err + Right txlogs -> do + pure txlogs + + submitAndCheckTx =<< createWriteTx =<< nextNonce + + C.withDefaultLogger Error $ \logger -> do + let flags = [C.NoVacuum, C.NoGrandHash] + let resetDb = False + + Backend.withSqliteDb cid logger pactDbDir resetDb $ \(SQLiteEnv db _) -> do + void $ C.compact C.Latest logger db flags + + txLogs <- crGetTxLogs =<< local cid cenv =<< createTxLogsTx =<< nextNonce + + latestState <- getLatestState + assertEqual + "txlogs match latest state" + txLogs + (map (\(rk, rd) -> (rk, J.toJsonViaEncode (_rdData rd))) (M.toList latestState)) + localTest :: Pact.TxCreationTime -> ClientEnv -> IO () localTest t cenv = do mv <- newMVar 0 @@ -211,7 +414,7 @@ localContTest t cenv step = do PollResponses m <- polling cid' cenv rks ExpectPactResult pid <- case _rkRequestKeys rks of rk NEL.:| [] -> maybe (assertFailure "impossible") (return . _pePactId) - $ HashMap.lookup rk m >>= _crContinuation + $ HM.lookup rk m >>= _crContinuation _ -> assertFailure "continuation did not succeed" step "execute /local continuation dry run" @@ -258,7 +461,7 @@ pollingConfirmDepth t cenv step = do PollResponses m <- pollingWithDepth cid' cenv rks (Just $ ConfirmationDepth 10) ExpectPactResult afterPolling <- getCurrentBlockHeight v cenv cid' - assertBool "there are two command results" $ length (HashMap.keys m) == 2 + assertBool "there are two command results" $ length (HM.keys m) == 2 -- we are checking that we have waited at least 10 blocks using /poll for the transaction assertBool "the difference between heights should be no less than the confirmation depth" $ (afterPolling - beforePolling) >= 10 @@ -416,7 +619,7 @@ localPreflightSimTest t cenv step = do Right MetadataValidationFailure{} -> assertFailure "Preflight produced an impossible result" Right (LocalResultWithWarns cr' ws) -> do - let crbh :: Integer = fromIntegral $ fromMaybe 0 $ getBlockHeight cr' + let crbh :: Integer = fromIntegral $ fromMaybe 0 $ crGetBlockHeight cr' expectedbh = 1 + fromIntegral currentBlockHeight assertBool "Preflight's metadata should have increment block height" -- we don't control the node in remote tests and the data can get oudated, @@ -437,7 +640,7 @@ localPreflightSimTest t cenv step = do Right MetadataValidationFailure{} -> assertFailure "Preflight produced an impossible result" Right (LocalResultWithWarns cr' ws) -> do - let crbh :: Integer = fromIntegral $ fromMaybe 0 $ getBlockHeight cr' + let crbh :: Integer = fromIntegral $ fromMaybe 0 $ crGetBlockHeight cr' expectedbh = toInteger $ 1 + (fromIntegral currentBlockHeight') - rewindDepth assertBool "Preflight's metadata block height should reflect the rewind depth" -- we don't control the node in remote tests and the data can get oudated, @@ -672,7 +875,7 @@ txTooBigGasTest t cenv step = do void $ step "pollApiClient: polling for request key" PollResponses resp <- polling sid cenv rks expectation - return (HashMap.lookup (NEL.head $ _rkRequestKeys rks) resp) + return (HM.lookup (NEL.head $ _rkRequestKeys rks) resp) runLocal (SubmitBatch cmds) = do void $ step "localApiClient: submit transaction" @@ -741,7 +944,7 @@ caplistTest t cenv step = do testCaseStep "poll for transfer results" PollResponses rs <- liftIO $ polling sid cenv rks ExpectPactResult - return (HashMap.lookup (NEL.head $ _rkRequestKeys rks) rs) + return (HM.lookup (NEL.head $ _rkRequestKeys rks) rs) case r of Left e -> assertFailure $ "test failure for TRANSFER + FUND_TX: " <> show e @@ -859,10 +1062,10 @@ allocationTest t cenv step = do sid = unsafeChainId 0 localAfterPollResponse (PollResponses prs) cr = - getBlockHeight cr > getBlockHeight (snd $ head $ HashMap.toList prs) + crGetBlockHeight cr > crGetBlockHeight (snd $ head $ HM.toList prs) localAfterBlockHeight bh cr = - getBlockHeight cr > Just bh + crGetBlockHeight cr > Just bh accountInfo = Right $ PObject @@ -948,5 +1151,5 @@ pactDeadBeef = let (TransactionHash b) = deadbeef in RequestKey $ Hash b -- avoiding `scientific` dep here -getBlockHeight :: CommandResult a -> Maybe Word64 -getBlockHeight = preview (crMetaData . _Just . key "blockHeight" . _Number . to ((fromIntegral :: Integer -> Word64 ) . round . toRational)) +crGetBlockHeight :: CommandResult a -> Maybe Word64 +crGetBlockHeight = preview (crMetaData . _Just . key "blockHeight" . _Number . to ((fromIntegral :: Integer -> Word64 ) . round . toRational)) diff --git a/test/Chainweb/Test/Pact/TTL.hs b/test/Chainweb/Test/Pact/TTL.hs index 18ee6305c2..7a3cd167a7 100644 --- a/test/Chainweb/Test/Pact/TTL.hs +++ b/test/Chainweb/Test/Pact/TTL.hs @@ -283,7 +283,7 @@ withTestPact rdb test = withResource' newEmptyMVar $ \mempoolVarIO -> withPactTestBlockDb testVer cid rdb (mempool mempoolVarIO) testPactServiceConfig $ \ios -> test $ do - (pq,bdb) <- ios + (_, pq, bdb) <- ios mp <- mempoolVarIO bhdb <- getBlockHeaderDb cid bdb return $ Ctx mp pq (_bdbPayloadDb bdb) bhdb diff --git a/test/Chainweb/Test/Pact/Utils.hs b/test/Chainweb/Test/Pact/Utils.hs index abe29dd80f..0681b89717 100644 --- a/test/Chainweb/Test/Pact/Utils.hs +++ b/test/Chainweb/Test/Pact/Utils.hs @@ -1,10 +1,13 @@ {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedRecordDot #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -80,6 +83,7 @@ module Chainweb.Test.Pact.Utils , csPrivKey -- * Pact Service creation , withPactTestBlockDb +, withPactTestBlockDb' , withWebPactExecutionService , withPactCtxSQLite , WithPactCtxSQLite @@ -90,6 +94,7 @@ module Chainweb.Test.Pact.Utils , testPactServiceConfig , withBlockHeaderDb , withTemporaryDir +, withSqliteDb -- * Mempool utils , delegateMemPoolAccess , withDelegateMempool @@ -99,6 +104,11 @@ module Chainweb.Test.Pact.Utils , runCut , Noncer , zeroNoncer +-- * Pact State +, compact +, PactRow(..) +, getLatestPactState +, getPactUserTables -- * miscellaneous , toTxCreationTime , dummyLogger @@ -113,7 +123,7 @@ module Chainweb.Test.Pact.Utils import Control.Arrow ((&&&)) import Control.Concurrent.Async import Control.Concurrent.MVar -import Control.Lens (view, _3, makeLenses) +import Control.Lens (view, _2, makeLenses) import Control.Monad import Control.Monad.Catch import Control.Monad.IO.Class @@ -126,6 +136,7 @@ import Data.Default (def) import Data.Foldable import qualified Data.HashMap.Strict as HM import Data.IORef +import Data.Map (Map) import qualified Data.Map.Strict as M import Data.Maybe import Data.Text (Text) @@ -134,11 +145,15 @@ import qualified Data.Text.Encoding as T import Data.String import qualified Data.Vector as V +import Database.SQLite3.Direct (Database) + import GHC.Generics +import Streaming.Prelude qualified as S import System.Directory import System.IO.Temp (createTempDirectory) import System.LogLevel +import System.Logger.Types qualified as LL import Test.Tasty @@ -174,11 +189,14 @@ import Chainweb.ChainId import Chainweb.Graph import Chainweb.Logger import Chainweb.Miner.Pact +import Chainweb.Pact.Backend.Compaction qualified as C +import Chainweb.Pact.Backend.PactState qualified as PactState +import Chainweb.Pact.Backend.PactState (TableDiffable(..), Table(..), PactRow(..)) import Chainweb.Pact.Backend.RelationalCheckpointer (initRelationalCheckpointer') import Chainweb.Pact.Backend.SQLite.DirectV2 import Chainweb.Pact.Backend.Types -import Chainweb.Pact.Backend.Utils +import Chainweb.Pact.Backend.Utils hiding (withSqliteDb) import Chainweb.Pact.PactService import Chainweb.Pact.Service.PactQueue import Chainweb.Pact.Service.Types @@ -805,6 +823,67 @@ withTemporaryDir = withResource (getTemporaryDirectory >>= \d -> createTempDirectory d "test-pact") removeDirectoryRecursive +-- | Single-chain Pact via service queue. +-- +-- The difference between this and 'withPactTestBlockDb' is that, +-- this function takes a `SQLiteEnv` resource which it then exposes +-- to the test function. +-- +-- TODO: Consolidate these two functions. +withPactTestBlockDb' + :: ChainwebVersion + -> ChainId + -> RocksDb + -> IO SQLiteEnv + -> IO MemPoolAccess + -> PactServiceConfig + -> (IO (SQLiteEnv,PactQueue,TestBlockDb) -> TestTree) + -> TestTree +withPactTestBlockDb' version cid rdb sqlEnvIO mempoolIO pactConfig f = + withResource' (mkTestBlockDb version rdb) $ \bdbio -> + withResource (startPact bdbio) stopPact $ f . fmap (view _2) + where + startPact bdbio = do + reqQ <- newPactQueue 2000 + bdb <- bdbio + sqlEnv <- sqlEnvIO + mempool <- mempoolIO + bhdb <- getWebBlockHeaderDb (_bdbWebBlockHeaderDb bdb) cid + let pdb = _bdbPayloadDb bdb + a <- async $ runForever (\_ _ -> return ()) "Chainweb.Test.Pact.Utils.withPactTestBlockDb" $ + runPactService version cid logger reqQ mempool bhdb pdb sqlEnv pactConfig + return (a, (sqlEnv,reqQ,bdb)) + + stopPact (a, _) = cancel a + + -- Ideally, we should throw 'error' when the logger is invoked, because + -- error logs should not happen in production and should always be resolved. + -- Unfortunately, that's not yet always the case. So we just drop the + -- message. + -- + logger = genericLogger Error (\_ -> return ()) + +withSqliteDb :: () + => ChainId + -> IO FilePath + -> (IO SQLiteEnv -> TestTree) + -> TestTree +withSqliteDb cid iodir s = withResource start stop s + where + start = do + dir <- iodir + startSqliteDb cid logger dir False + + stop env = do + stopSqliteDb env + + -- Ideally, we should throw 'error' when the logger is invoked, because + -- error logs should not happen in production and should always be resolved. + -- Unfortunately, that's not yet always the case. So we just drop the + -- message. + -- + logger = genericLogger Error (\_ -> return ()) + -- | Single-chain Pact via service queue. withPactTestBlockDb :: ChainwebVersion @@ -812,12 +891,12 @@ withPactTestBlockDb -> RocksDb -> IO MemPoolAccess -> PactServiceConfig - -> (IO (PactQueue,TestBlockDb) -> TestTree) + -> (IO (SQLiteEnv,PactQueue,TestBlockDb) -> TestTree) -> TestTree withPactTestBlockDb version cid rdb mempoolIO pactConfig f = withTemporaryDir $ \iodir -> withResource' (mkTestBlockDb version rdb) $ \bdbio -> - withResource (startPact bdbio iodir) stopPact $ f . fmap (view _3) + withResource (startPact bdbio iodir) stopPact $ f . fmap (view _2) where startPact bdbio iodir = do reqQ <- newPactQueue 2000 @@ -829,9 +908,9 @@ withPactTestBlockDb version cid rdb mempoolIO pactConfig f = sqlEnv <- startSqliteDb cid logger dir False a <- async $ runForever (\_ _ -> return ()) "Chainweb.Test.Pact.Utils.withPactTestBlockDb" $ runPactService version cid logger reqQ mempool bhdb pdb sqlEnv pactConfig - return (a, sqlEnv, (reqQ,bdb)) + return (a, (sqlEnv,reqQ,bdb)) - stopPact (a, sqlEnv, _) = cancel a >> stopSqliteDb sqlEnv + stopPact (a, (sqlEnv, _, _)) = cancel a >> stopSqliteDb sqlEnv -- Ideally, we should throw 'error' when the logger is invoked, because -- error logs should not happen in production and should always be resolved. @@ -866,3 +945,43 @@ someBlockHeader v h = (!! (int h - 1)) makeLenses ''CmdBuilder makeLenses ''CmdSigner + +-- | Get all pact user tables. +-- +-- Note: This consumes a stream. If you are writing a test +-- with very large pact states (think: Gigabytes), use +-- the streaming version of this function from +-- 'Chainweb.Pact.Backend.PactState'. +getPactUserTables :: Database -> IO (Map Text [PactRow]) +getPactUserTables db = do + S.foldM_ + (\m tbl -> pure (M.insert tbl.name tbl.rows m)) + (pure M.empty) + pure + (PactState.getPactTables db) + +-- | Get active/latest pact state. +-- +-- Note: This consumes a stream. If you are writing a test +-- with very large pact states (think: Gigabytes), use +-- the streaming version of this function from +-- 'Chainweb.Pact.Backend.PactState'. +getLatestPactState :: Database -> IO (Map Text (Map ByteString ByteString)) +getLatestPactState db = do + S.foldM_ + (\m td -> pure (M.insert td.name td.rows m)) + (pure M.empty) + pure + (PactState.getLatestPactState db) + +-- | Compaction utility for testing. +-- Most of the time the flags will be ['C.NoVacuum', 'C.NoGrandHash'] +compact :: () + => LL.LogLevel + -> [C.CompactFlag] + -> SQLiteEnv + -> C.TargetBlockHeight + -> IO () +compact logLevel cFlags (SQLiteEnv db _) bh = do + C.withDefaultLogger logLevel $ \logger -> do + void $ C.compact bh logger db cFlags diff --git a/test/Chainweb/Test/Rosetta/RestAPI.hs b/test/Chainweb/Test/Rosetta/RestAPI.hs index 6bb470b4a3..f5adddb63c 100644 --- a/test/Chainweb/Test/Rosetta/RestAPI.hs +++ b/test/Chainweb/Test/Rosetta/RestAPI.hs @@ -76,7 +76,7 @@ import System.IO.Unsafe (unsafePerformIO) v :: ChainwebVersion v = fastForkingCpmTestVersion petersonChainGraph -nodes :: Natural +nodes :: Word nodes = 1 cid :: ChainId diff --git a/test/Chainweb/Test/Utils.hs b/test/Chainweb/Test/Utils.hs index 986bb02c10..36e6c21c39 100644 --- a/test/Chainweb/Test/Utils.hs +++ b/test/Chainweb/Test/Utils.hs @@ -1,5 +1,6 @@ {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE ImportQualifiedPost #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE NumericUnderscores #-} @@ -10,7 +11,6 @@ {-# LANGUAGE TupleSections #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE ViewPatterns #-} -- | -- Module: Chainweb.Test.Utils @@ -117,13 +117,14 @@ module Chainweb.Test.Utils , host , interface , testRetryPolicy +, withDbDirs ) where import Control.Concurrent import Control.Concurrent.Async import Control.Lens import Control.Monad -import Control.Monad.Catch (finally, bracket) +import Control.Monad.Catch (MonadCatch, catch, finally, bracket) import Control.Monad.IO.Class import Control.Monad.Trans.Resource import Control.Retry @@ -157,6 +158,7 @@ import Numeric.Natural import Servant.Client (BaseUrl(..), ClientEnv, Scheme(..), mkClientEnv, runClientM) +import System.Directory (removeDirectoryRecursive) import System.Environment (withArgs) import System.IO import System.IO.Temp @@ -897,6 +899,7 @@ matchTest pat = withArgs ["-p",pat] data ChainwebNetwork = ChainwebNetwork { _getClientEnv :: !ClientEnv , _getServiceClientEnv :: !ClientEnv + , _getNodeDbDirs :: ![(FilePath, FilePath)] } withNodes_ @@ -905,16 +908,17 @@ withNodes_ -> ChainwebVersion -> B.ByteString -> RocksDb - -> Natural + -> Word -> ResourceT IO ChainwebNetwork -withNodes_ logger v testLabel rdb n = - (uncurry ChainwebNetwork . snd) . snd <$> - allocate start (cancel . fst) +withNodes_ logger v testLabel rdb n = do + nodeDbDirs <- withDbDirs n + (_rkey, (_async, (p2p, service))) <- allocate (start nodeDbDirs) (cancel . fst) + pure (ChainwebNetwork p2p service nodeDbDirs) where - start :: IO (Async (), (ClientEnv, ClientEnv)) - start = do + start :: [(FilePath, FilePath)] -> IO (Async (), (ClientEnv, ClientEnv)) + start dbDirs = do peerInfoVar <- newEmptyMVar - a <- async $ runTestNodes testLabel rdb logger v n peerInfoVar + a <- async $ runTestNodes testLabel rdb logger v peerInfoVar dbDirs (i, servicePort) <- readMVar peerInfoVar cwEnv <- getClientEnv $ getCwBaseUrl Https $ _hostAddressPort $ _peerAddr i cwServiceEnv <- getClientEnv $ getCwBaseUrl Http servicePort @@ -932,7 +936,7 @@ withNodes :: ChainwebVersion -> B.ByteString -> RocksDb - -> Natural + -> Word -> ResourceT IO ChainwebNetwork withNodes = withNodes_ (genericLogger Error (error . T.unpack)) -- Test resources are part of test infrastructure and should never print @@ -943,7 +947,7 @@ withNodesAtLatestBehavior :: ChainwebVersion -> B.ByteString -> RocksDb - -> Natural + -> Word -> ResourceT IO ChainwebNetwork withNodesAtLatestBehavior v testLabel rdb n = do net <- withNodes v testLabel rdb n @@ -992,19 +996,19 @@ runTestNodes -> RocksDb -> logger -> ChainwebVersion - -> Natural -> MVar (PeerInfo, Port) + -> [(FilePath, FilePath)] + -- ^ A Map from Node Id to (Pact DB Dir, RocksDB Dir). + -- The index is just the position in the list. -> IO () -runTestNodes testLabel rdb logger ver n portMVar = - forConcurrently_ [0 .. int n - 1] $ \i -> do - threadDelay (1000 * int i) - let baseConf = config ver n - conf <- if - | i == 0 -> - return $ bootstrapConfig baseConf - | otherwise -> - setBootstrapPeerInfo <$> (fst <$> readMVar portMVar) <*> pure baseConf - node testLabel rdb logger portMVar conf i +runTestNodes testLabel rdb logger ver portMVar dbDirs = do + forConcurrently_ (zip [0 ..] dbDirs) $ \(nid, (pactDbDir, rocksDbDir)) -> do + threadDelay (1000 * int nid) + let baseConf = config ver (int (length dbDirs)) + conf <- if nid == 0 + then return $ bootstrapConfig baseConf + else setBootstrapPeerInfo <$> (fst <$> readMVar portMVar) <*> pure baseConf + node testLabel rdb logger portMVar conf pactDbDir rocksDbDir nid node :: Logger logger @@ -1013,28 +1017,29 @@ node -> logger -> MVar (PeerInfo, Port) -> ChainwebConfiguration - -> Int + -> FilePath + -- ^ pact db dir + -> FilePath + -- ^ rocksdb dir + -> Word -- ^ Unique Node Id. The node id 0 is used for the bootstrap node -> IO () -node testLabel rdb rawLogger peerInfoVar conf nid = do +node testLabel rdb rawLogger peerInfoVar conf pactDbDir rocksDbDir nid = do rocksDb <- testRocksDb (testLabel <> T.encodeUtf8 (toText nid)) rdb - withSystemTempDirectory "test-backupdir" $ \backupDir -> - withSystemTempDirectory "test-rocksdb" $ \dir -> - withChainweb conf logger rocksDb backupDir dir False $ \case - StartedChainweb cw -> do - - -- If this is the bootstrap node we extract the port number and publish via an MVar. - when (nid == 0) $ do - let bootStrapInfo = view (chainwebPeer . peerResPeer . peerInfo) cw - bootStrapPort = view (chainwebServiceSocket . _1) cw - putMVar peerInfoVar (bootStrapInfo, bootStrapPort) - - poisonDeadBeef cw - runChainweb cw `finally` do - logFunctionText logger Info "write sample data" - logFunctionText logger Info "shutdown node" - return () - Replayed _ _ -> error "node: should not be a replay" + withChainweb conf logger rocksDb pactDbDir rocksDbDir False $ \case + StartedChainweb cw -> do + -- If this is the bootstrap node we extract the port number and publish via an MVar. + when (nid == 0) $ do + let bootStrapInfo = view (chainwebPeer . peerResPeer . peerInfo) cw + bootStrapPort = view (chainwebServiceSocket . _1) cw + putMVar peerInfoVar (bootStrapInfo, bootStrapPort) + + poisonDeadBeef cw + runChainweb cw `finally` do + logFunctionText logger Info "write sample data" + logFunctionText logger Info "shutdown node" + return () + Replayed _ _ -> error "node: should not be a replay" where logger = addLabel ("node", sshow nid) rawLogger @@ -1043,6 +1048,31 @@ node testLabel rdb rawLogger peerInfoVar conf nid = do crs = map snd $ HashMap.toList $ view chainwebChains cw poison cr = mempoolAddToBadList (view chainResMempool cr) (V.singleton deadbeef) +withDbDirs :: Word -> ResourceT IO [(FilePath, FilePath)] +withDbDirs n = do + let create :: IO [(FilePath, FilePath)] + create = do + forM [0 .. n - 1] $ \nid -> do + targetDir1 <- getCanonicalTemporaryDirectory + targetDir2 <- getCanonicalTemporaryDirectory + + dir1 <- createTempDirectory targetDir1 ("pactdb-dir-" ++ show nid) + dir2 <- createTempDirectory targetDir2 ("rocksdb-dir-" ++ show nid) + + pure (dir1, dir2) + + let destroy :: [(FilePath, FilePath)] -> IO () + destroy m = flip foldMap m $ \(d1, d2) -> do + ignoringIOErrors $ do + removeDirectoryRecursive d1 + removeDirectoryRecursive d2 + + (_, m) <- allocate create destroy + pure m + where + ignoringIOErrors :: (MonadCatch m) => m () -> m () + ignoringIOErrors ioe = ioe `catch` (\(_ :: IOError) -> pure ()) + deadbeef :: TransactionHash deadbeef = TransactionHash "deadbeefdeadbeefdeadbeefdeadbeef" diff --git a/test/SlowTests.hs b/test/SlowTests.hs index 85bd784023..b92a229384 100644 --- a/test/SlowTests.hs +++ b/test/SlowTests.hs @@ -34,6 +34,7 @@ suite :: TestTree suite = testGroup "ChainwebSlowTests" [ Chainweb.Test.MultiNode.test loglevel (timedConsensusVersion petersonChainGraph twentyChainGraph) 10 30 , Chainweb.Test.MultiNode.replayTest loglevel (fastForkingCpmTestVersion pairChainGraph) 6 + , Chainweb.Test.MultiNode.compactAndResumeTest loglevel (fastForkingCpmTestVersion pairChainGraph) 6 , testGroup "Network.X05.SelfSigned.Test" [ Network.X509.SelfSigned.Test.tests ] diff --git a/tools/cwtool/CwTool.hs b/tools/cwtool/CwTool.hs index 2ef8c6a13b..c1d552e68a 100644 --- a/tools/cwtool/CwTool.hs +++ b/tools/cwtool/CwTool.hs @@ -9,6 +9,9 @@ import System.Environment import System.Exit import Text.Printf +import Chainweb.Pact.Backend.Compaction (main) +import Chainweb.Pact.Backend.PactState (pactDiffMain) + import qualified CheckpointerDBChecksum import qualified Ea import qualified EncodeDecodeB64Util @@ -99,6 +102,14 @@ topLevelCommands = "tx-sim" "Simulate tx execution against real pact dbs" TxSimulator.simulateMain + , CommandSpec + "compact" + "Compact pact database" + Chainweb.Pact.Backend.Compaction.main + , CommandSpec + "pact-diff" + "Diff the latest state of two pact databases" + Chainweb.Pact.Backend.PactState.pactDiffMain , CommandSpec "calculate-release" "Calculate next service date and block heights for upgrades"