From 91e1db2159d773ecdfda691a58580815cba0e2dc Mon Sep 17 00:00:00 2001 From: Lars Kuhtz Date: Mon, 23 Dec 2024 19:22:07 -0800 Subject: [PATCH] Add chainweb-beacon package --- cabal.project | 1 + chainweb-beacon/LICENSE | 29 + chainweb-beacon/README.md | 4 + chainweb-beacon/Setup.hs | 445 +++++++++++ chainweb-beacon/c/rlim_utils.c | 30 + chainweb-beacon/chainweb-beacon.cabal | 139 ++++ .../src/Chainweb/Beacon/Configuration.hs | 464 +++++++++++ chainweb-beacon/src/ChainwebBeaconNode.hs | 730 ++++++++++++++++++ chainweb-beacon/src/Utils/CheckRLimits.hs | 52 ++ .../src/Utils/InstallSignalHandlers.hs | 122 +++ 10 files changed, 2016 insertions(+) create mode 100644 chainweb-beacon/LICENSE create mode 100644 chainweb-beacon/README.md create mode 100644 chainweb-beacon/Setup.hs create mode 100644 chainweb-beacon/c/rlim_utils.c create mode 100644 chainweb-beacon/chainweb-beacon.cabal create mode 100644 chainweb-beacon/src/Chainweb/Beacon/Configuration.hs create mode 100644 chainweb-beacon/src/ChainwebBeaconNode.hs create mode 100644 chainweb-beacon/src/Utils/CheckRLimits.hs create mode 100644 chainweb-beacon/src/Utils/InstallSignalHandlers.hs diff --git a/cabal.project b/cabal.project index 82cde178f0..b8370a9380 100644 --- a/cabal.project +++ b/cabal.project @@ -2,6 +2,7 @@ packages: chainweb.cabal node/chainweb-node.cabal cwtools/cwtools.cabal + chainweb-beacon/chainweb-beacon.cabal debug-info: True diff --git a/chainweb-beacon/LICENSE b/chainweb-beacon/LICENSE new file mode 100644 index 0000000000..047dc735fb --- /dev/null +++ b/chainweb-beacon/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2018 - 2024 Kadena LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/chainweb-beacon/README.md b/chainweb-beacon/README.md new file mode 100644 index 0000000000..9381a64a48 --- /dev/null +++ b/chainweb-beacon/README.md @@ -0,0 +1,4 @@ +# Chainweb Node Application + +For details please see the documentation of the +[chainweb project](https://github.com/kadena-io/chainweb-node/README.md). diff --git a/chainweb-beacon/Setup.hs b/chainweb-beacon/Setup.hs new file mode 100644 index 0000000000..74302cb25e --- /dev/null +++ b/chainweb-beacon/Setup.hs @@ -0,0 +1,445 @@ +-- ------------------------------------------------------ -- +-- Copyright © 2019-2023 Kadena LLC +-- Copyright © 2019 Colin Woodbury +-- Copyright © 2015-2018 Lars Kuhtz +-- Copyright © 2014 AlephCloud Systems, Inc. +-- ------------------------------------------------------ -- + +{-# LANGUAGE CPP #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiWayIf #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} + +{-# OPTIONS_HADDOCK show-extensions #-} + +-- | This module contains a @Setup.hs@ script that hooks into the cabal build +-- process at the end of the configuration phase and generates a module with +-- package information for each component of the cabal package. +-- +-- The modules are created in the /autogen/ build directories where also the +-- @Path_@ modules are created by cabal's simple build setup. +-- +-- = Usage as Setup Script +-- +-- There are three ways how this module can be used: +-- +-- 1. Copy the code of this module into a file called @Setup.hs@ in the root +-- directory of your package. +-- +-- 2. If the /configuration-tools/ package is already installed in the system +-- where the build is done, following code can be used as @Setup.hs@ script: +-- +-- > module Main (main) where +-- > +-- > import Configuration.Utils.Setup +-- +-- 3. For usage within a more complex @Setup.hs@ script you shall import this +-- module qualified and use the 'mkPkgInfoModules' function. For example: +-- +-- > module Main (main) where +-- > +-- > import qualified Configuration.Utils.Setup as ConfTools +-- > +-- > main :: IO () +-- > main = defaultMainWithHooks (ConfTools.mkPkgInfoModules simpleUserHooks) +-- > +-- +-- With all methods the field @Build-Type@ in the package description (cabal) file +-- must be set to @Custom@: +-- +-- > Build-Type: Custom +-- +-- +-- = Integration With "Configuration.Utils" +-- +-- You can integrate the information provided by the @PkgInfo@ modules with the +-- command line interface of an application by importing the respective module +-- for the component and using the +-- 'Configuration.Utils.runWithPkgInfoConfiguration' function from the module +-- "Configuration.Utils" as show in the following example: +-- +-- > {-# LANGUAGE OverloadedStrings #-} +-- > {-# LANGUAGE FlexibleInstances #-} +-- > +-- > module Main +-- > ( main +-- > ) where +-- > +-- > import Configuration.Utils +-- > import PkgInfo +-- > +-- > instance FromJSON (() -> ()) where parseJSON _ = pure id +-- > +-- > mainInfo :: ProgramInfo () +-- > mainInfo = programInfo "Hello World" (pure id) () +-- > +-- > main :: IO () +-- > main = runWithPkgInfoConfiguration mainInfo pkgInfo . const $ putStrLn "hello world" +-- +-- With that the resulting application supports the following additional command +-- line options: +-- +-- [@--version@, @-v@] +-- prints the version of the application and exits. +-- +-- [@--info@, @-i@] +-- prints a short info message for the application and exits. +-- +-- [@--long-info@] +-- print a detailed info message for the application and exits. +-- Beside component name, package name, version, revision, and copyright +-- the message also contain information about the compiler that +-- was used for the build, the build architecture, build flags, +-- the author, the license type, and a list of all direct and +-- indirect dependencies along with their licenses and copyrights. +-- +-- [@--license@] +-- prints the text of the lincense of the application and exits. +-- +module Main +( main +) where + +import qualified Distribution.Compat.Graph as Graph +import qualified Distribution.InstalledPackageInfo as I +import Distribution.PackageDescription +import Distribution.Pretty +import Distribution.Simple +import Distribution.Simple.BuildPaths +import Distribution.Simple.LocalBuildInfo +import Distribution.Simple.PackageIndex +import Distribution.Simple.Setup +import Distribution.Text +import Distribution.Types.LocalBuildInfo +import Distribution.Types.UnqualComponentName +import Distribution.Utils.Path +import Distribution.Utils.ShortText + +import System.Process + +import Control.Applicative +import Control.Monad + +import qualified Data.ByteString as B +import Data.ByteString.Char8 (pack) +import Data.Char (isSpace) +import Data.List (intercalate) +import Data.Maybe (fromMaybe) +import Data.Monoid + +import System.Directory + (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, + doesFileExist, getCurrentDirectory) +import System.Environment (lookupEnv) +import System.Exit (ExitCode(ExitSuccess)) +import System.FilePath (isDrive, takeDirectory, ()) + +-- | Include this function when your setup doesn't contain any +-- extra functionality. +-- +main :: IO () +main = defaultMainWithHooks (mkPkgInfoModules simpleUserHooks) + +-- | Modifies the given record of hooks by adding functionality that +-- creates a package info module for each component of the cabal package. +-- +-- This function is intended for usage in more complex @Setup.hs@ scripts. +-- If your setup doesn't contain any other function you can just import +-- the 'main' function from this module. +-- +-- The modules are created in the /autogen/ build directories where also the +-- @Path_@ modules are created by cabal's simple build setup. +-- +mkPkgInfoModules + :: UserHooks + -> UserHooks +mkPkgInfoModules hooks = hooks + { postConf = mkPkgInfoModulesPostConf (postConf hooks) + } + +-- -------------------------------------------------------------------------- -- +-- Compat Implementations + +prettyLicense :: I.InstalledPackageInfo -> String +prettyLicense = either prettyShow prettyShow . I.license + +ft :: ShortText -> String +ft = fromShortText + +-- -------------------------------------------------------------------------- -- +-- Cabal 2.0 + +mkPkgInfoModulesPostConf + :: (Args -> ConfigFlags -> PackageDescription -> LocalBuildInfo -> IO ()) + -> Args + -> ConfigFlags + -> PackageDescription + -> LocalBuildInfo + -> IO () +mkPkgInfoModulesPostConf hook args flags pkgDesc bInfo = do + mapM_ (updatePkgInfoModule pkgDesc bInfo) $ Graph.toList $ componentGraph bInfo + hook args flags pkgDesc bInfo + +updatePkgInfoModule :: PackageDescription -> LocalBuildInfo -> ComponentLocalBuildInfo -> IO () +updatePkgInfoModule pkgDesc bInfo clbInfo = do + createDirectoryIfMissing True dirName + moduleBytes <- pkgInfoModule moduleName cName pkgDesc bInfo + updateFile fileName moduleBytes + + -- legacy module + legacyModuleBytes <- pkgInfoModule legacyModuleName cName pkgDesc bInfo + updateFile legacyFileName legacyModuleBytes + + where + dirName = autogenComponentModulesDir bInfo clbInfo + cName = unUnqualComponentName <$> componentNameString (componentLocalName clbInfo) + + moduleName = pkgInfoModuleName + fileName = dirName ++ "/" ++ moduleName ++ ".hs" + + legacyModuleName = legacyPkgInfoModuleName cName + legacyFileName = dirName ++ "/" ++ legacyModuleName ++ ".hs" + +-- -------------------------------------------------------------------------- -- +-- Generate PkgInfo Module + +pkgInfoModuleName :: String +pkgInfoModuleName = "PkgInfo" + +updateFile :: FilePath -> B.ByteString -> IO () +updateFile fileName content = do + x <- doesFileExist fileName + if | not x -> update + | otherwise -> do + oldRevisionFile <- B.readFile fileName + when (oldRevisionFile /= content) update + where + update = B.writeFile fileName content + +legacyPkgInfoModuleName :: Maybe String -> String +legacyPkgInfoModuleName Nothing = "PkgInfo" +legacyPkgInfoModuleName (Just cn) = "PkgInfo_" ++ map tr cn + where + tr '-' = '_' + tr c = c + +trim :: String -> String +trim = f . f + where f = reverse . dropWhile isSpace + +-- -------------------------------------------------------------------------- -- +-- VCS + +getVCS :: IO (Maybe KnownRepoType) +getVCS = getCurrentDirectory >>= getVcsOfDir + where + getVcsOfDir d = do + canonicDir <- canonicalizePath d + doesDirectoryExist (canonicDir ".hg") >>= \x0 -> if x0 + then return (Just Mercurial) + else doesDirectoryExist (canonicDir ".git") >>= \x1 -> if x1 + then return $ Just Git + else if isDrive canonicDir + then return Nothing + else getVcsOfDir (takeDirectory canonicDir) + +-- | Returns tag, revision, and branch name. +-- +hgInfo :: IO (String, String, String) +hgInfo = do + tag <- trim <$> readProcess "hg" ["id", "-r", "max(ancestors(\".\") and tag())", "-t"] "" + rev <- trim <$> readProcess "hg" ["id", "-i"] "" + branch <- trim <$> readProcess "hg" ["id", "-b"] "" + return (tag, rev, branch) + +-- | Returns tag, revision, and branch name. +-- +gitInfo :: IO (String, String, String) +gitInfo = do + tag <- do + (exitCode, out, _err) <- readProcessWithExitCode "git" ["describe", "--exact-match", "--tags", "--abbrev=0"] "" + case exitCode of + ExitSuccess -> return $ trim out + _ -> return "" + rev <- trim <$> readProcess "git" ["rev-parse", "--short", "HEAD"] "" + branch <- trim <$> readProcess "git" ["rev-parse", "--abbrev-ref", "HEAD"] "" + return (tag, rev, branch) + +-- | Returns tag, revision, and branch name. +-- +gitlabCiVcsInfo :: IO (Maybe (String, String, String)) +gitlabCiVcsInfo = do + lookupEnv "CI_COMMIT_SHORT_SHA" >>= \case + Nothing -> return Nothing + Just _ -> do + rev <- fromMaybe "" <$> lookupEnv "CI_COMMIT_SHORT_SHA" + branch <- fromMaybe "" <$> lookupEnv "CI_COMMIT_REF_NAME" + tag <- fromMaybe "" <$> lookupEnv "CI_COMMIT_TAG" + return $ Just (tag, rev, branch) + +-- | The file format is revision, branch, and tag separate by newline +-- characters. +-- +fileVcsInfo :: IO (Maybe (String, String, String)) +fileVcsInfo = do + doesFileExist ".vcs-info" >>= \x -> if x + then do + (rev : branch : tag : _) <- (<> ["", "", ""]) . lines <$> readFile ".vcs-info" + return $ Just (tag, rev, branch) + else return Nothing + +-- | Returns tag, revision, and branch name. +-- +noVcsInfo :: IO (String, String, String) +noVcsInfo = return ("", "", "") + +-- | Returns tag, revision, and branch name. +-- +getVcsInfo :: IO (String, String, String) +getVcsInfo = getVCS >>= \case + Just Mercurial -> hgInfo + Just Git -> gitInfo + _ -> gitlabCiVcsInfo >>= \case + Just a -> return a + Nothing -> fileVcsInfo >>= \case + Just a -> return a + Nothing -> noVcsInfo + +-- -------------------------------------------------------------------------- -- +-- Generate Module + +pkgInfoModule :: String -> Maybe String -> PackageDescription -> LocalBuildInfo -> IO B.ByteString +pkgInfoModule moduleName cName pkgDesc bInfo = do + (tag, revision, branch) <- getVcsInfo + + let vcsBranch = if branch == "default" || branch == "master" then "" else branch + vcsVersion = intercalate "-" . filter (/= "") $ [tag, revision, vcsBranch] + flags = map (unFlagName . fst) . filter snd . unFlagAssignment . configConfigurationsFlags . configFlags $ bInfo + + licenseString <- licenseFilesText pkgDesc + + return $ B.intercalate "\n" + [ "{-# LANGUAGE OverloadedStrings #-}" + , "{-# LANGUAGE RankNTypes #-}" + , "" + , "module " <> pack moduleName <> " " <> deprecatedMsg <> " where" + , "" + , " import Data.String (IsString)" + , " import Data.Monoid" + , " import Prelude hiding ((<>))" + , "" + , " name :: IsString a => Maybe a" + , " name = " <> maybe "Nothing" (\x -> "Just \"" <> pack x <> "\"") cName + , "" + , " tag :: IsString a => a" + , " tag = \"" <> pack tag <> "\"" + , "" + , " revision :: IsString a => a" + , " revision = \"" <> pack revision <> "\"" + , "" + , " branch :: IsString a => a" + , " branch = \"" <> pack branch <> "\"" + , "" + , " branch' :: IsString a => a" + , " branch' = \"" <> pack vcsBranch <> "\"" + , "" + , " vcsVersion :: IsString a => a" + , " vcsVersion = \"" <> pack vcsVersion <> "\"" + , "" + , " compiler :: IsString a => a" + , " compiler = \"" <> (pack . display . compilerId . compiler) bInfo <> "\"" + , "" + , " flags :: IsString a => [a]" + , " flags = " <> (pack . show) flags + , "" + , " optimisation :: IsString a => a" + , " optimisation = \"" <> (displayOptimisationLevel . withOptimization) bInfo <> "\"" + , "" + , " arch :: IsString a => a" + , " arch = \"" <> (pack . display . hostPlatform) bInfo <> "\"" + , "" + , " license :: IsString a => a" + , " license = \"" <> (pack . prettyShow . license) pkgDesc <> "\"" + , "" + , " licenseText :: IsString a => a" + , " licenseText = " <> (pack . show) licenseString + , "" + , " copyright :: IsString a => a" + , " copyright = " <> (pack . show . copyright) pkgDesc + , "" + , " author :: IsString a => a" + , " author = \"" <> (pack . ft . author) pkgDesc <> "\"" + , "" + , " homepage :: IsString a => a" + , " homepage = \"" <> (pack . ft . homepage) pkgDesc <> "\"" + , "" + , " package :: IsString a => a" + , " package = \"" <> (pack . display . package) pkgDesc <> "\"" + , "" + , " packageName :: IsString a => a" + , " packageName = \"" <> (pack . display . packageName) pkgDesc <> "\"" + , "" + , " packageVersion :: IsString a => a" + , " packageVersion = \"" <> (pack . display . packageVersion) pkgDesc <> "\"" + , "" + , " dependencies :: IsString a => [a]" + , " dependencies = " <> (pack . show . map (display . packageId) . allPackages . installedPkgs) bInfo + , "" + , " dependenciesWithLicenses :: IsString a => [a]" + , " dependenciesWithLicenses = " <> (pack . show . map pkgIdWithLicense . allPackages . installedPkgs) bInfo + , "" + , " versionString :: (Monoid a, IsString a) => a" + , " versionString = case name of" + , " Nothing -> package <> \" (revision \" <> vcsVersion <> \")\"" + , " Just n -> n <> \"-\" <> packageVersion <> \" (package \" <> package <> \" revision \" <> vcsVersion <> \")\"" + , "" + , " info :: (Monoid a, IsString a) => a" + , " info = versionString <> \"\\n\" <> copyright" + , "" + , " longInfo :: (Monoid a, IsString a) => a" + , " longInfo = info <> \"\\n\\n\"" + , " <> \"Author: \" <> author <> \"\\n\"" + , " <> \"License: \" <> license <> \"\\n\"" + , " <> \"Homepage: \" <> homepage <> \"\\n\"" + , " <> \"Build with: \" <> compiler <> \" (\" <> arch <> \")\" <> \"\\n\"" + , " <> \"Build flags: \" <> mconcat (map (\\x -> \" \" <> x) flags) <> \"\\n\"" + , " <> \"Optimisation: \" <> optimisation <> \"\\n\\n\"" + , " <> \"Dependencies:\\n\" <> mconcat (map (\\x -> \" \" <> x <> \"\\n\") dependenciesWithLicenses)" + , "" + , " pkgInfo :: (Monoid a, IsString a) => (a, a, a, a)" + , " pkgInfo =" + , " ( info" + , " , longInfo" + , " , versionString" + , " , licenseText" + , " )" + , "" + ] + where + displayOptimisationLevel NoOptimisation = "none" + displayOptimisationLevel NormalOptimisation = "normal" + displayOptimisationLevel MaximumOptimisation = "maximum" + + deprecatedMsg = if moduleName /= pkgInfoModuleName + then "{-# DEPRECATED \"Update to Cabal 2.0 or later and use just PkgInfo as module name.\" #-}" + else "" + +licenseFilesText :: PackageDescription -> IO B.ByteString +licenseFilesText pkgDesc = + B.intercalate "\n------------------------------------------------------------\n" <$> mapM fileTextStr + (licenseFiles pkgDesc) + where + fileTextStr = fileText . getSymbolicPath + fileText file = doesFileExist file >>= \x -> if x + then B.readFile file + else return "" + +pkgIdWithLicense :: I.InstalledPackageInfo -> String +pkgIdWithLicense a = (display . packageId) a + ++ " [" + ++ prettyLicense a + ++ (if cr /= "" then ", " ++ cr else "") + ++ "]" + where + cr = (unwords . words . ft . I.copyright) a diff --git a/chainweb-beacon/c/rlim_utils.c b/chainweb-beacon/c/rlim_utils.c new file mode 100644 index 0000000000..43f0751892 --- /dev/null +++ b/chainweb-beacon/c/rlim_utils.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include + +struct uint64_t_pair { + uint64_t fst; + uint64_t snd; +}; + +int get_open_file_limits(struct uint64_t_pair* out) { + struct rlimit lim; + int ret; + if ((ret = getrlimit(RLIMIT_NOFILE, &lim)) == 0) { + out->fst = lim.rlim_cur; + out->snd = lim.rlim_max; + } else { + out->fst = -1; + out->snd = -1; + } + return ret; +} + +int set_open_file_limits(struct uint64_t_pair *limits) { + struct rlimit lim; + lim.rlim_cur = limits->fst; + lim.rlim_max = limits->snd; + if (setrlimit(RLIMIT_NOFILE, &lim) == 0) return 0; + else return errno; +} diff --git a/chainweb-beacon/chainweb-beacon.cabal b/chainweb-beacon/chainweb-beacon.cabal new file mode 100644 index 0000000000..0dd53897a7 --- /dev/null +++ b/chainweb-beacon/chainweb-beacon.cabal @@ -0,0 +1,139 @@ +cabal-version: 3.8 + +name: chainweb-beacon +version: 2.26 +synopsis: Beacon-Node for Chainweb +description: Beacon-Node for Chainweb, a Proof-of-Work Parallel-Chain Architecture for Massive Throughput. +homepage: https://github.com/kadena-io/chainweb +bug-reports: https://github.com/kadena-io/chainweb/issues +license: BSD-3-Clause +license-file: LICENSE +author: Chainweb Dev Team +maintainer: chainweb-dev@kadena.io +copyright: Copyright (C) 2018 - 2024 Kadena LLC +category: Blockchain, Currency, Bitcoin, Kadena +build-type: Custom + +tested-with: + GHC == 9.10 + GHC == 9.8 + GHC == 9.6 + +extra-source-files: + CHANGELOG.md + README.md + LICENSE + +source-repository head + type: git + location: https://github.com/kadena-io/chainweb-node.git + sub-directory: chainweb-beacon + +flag ed25519 + description: + Use ED25519 certificates; depends on the master branch of the tls + package. + default: False + manual: True + +flag debug + description: + Enable various debugging features + default: False + manual: True + +flag ghc-flags + description: Enable ghc dumps of .ghc.flags and .ghc.version for tooling + default: False + manual: True + +common debugging-flags + if flag(debug) + ghc-options: + -g + cpp-options: + -DDEBUG_MULTINODE_TEST=1 + +common warning-flags + ghc-options: + -Wall + -Werror + -Wcompat + -Wpartial-fields + -Wincomplete-record-updates + -Wincomplete-uni-patterns + -Widentities + -funclutter-valid-hole-fits + -fmax-relevant-binds=0 + + -- This needed because -Werror and missing-home-modules causes + -- problems with ghci. + -Wno-missing-home-modules + +custom-setup + setup-depends: + , Cabal >= 3.8 + , base >= 4.12 && < 5 + , bytestring >= 0.10.12 + , directory >= 1.3 + , filepath >= 1.4 + , process >= 1.5 + +-- -------------------------------------------------------------------------- -- +-- Chainweb Node Application +-- -------------------------------------------------------------------------- -- + +-- The application that runs an chainweb node +-- +executable chainweb-beacon + import: warning-flags, debugging-flags + + default-language: Haskell2010 + ghc-options: + -threaded + "-with-rtsopts=-N -H1G -A64M --disable-delayed-os-memory-return" + -rtsopts + hs-source-dirs: src + main-is: ChainwebBeaconNode.hs + c-sources: c/rlim_utils.c + other-modules: + Paths_chainweb_beacon + PkgInfo + Utils.CheckRLimits + Utils.InstallSignalHandlers + Chainweb.Beacon.Configuration + autogen-modules: + Paths_chainweb_beacon + PkgInfo + build-depends: + -- internal + , chainweb + + -- external + , async >= 2.2 + , base >= 4.12 && < 5 + , chainweb-storage >= 0.1 + , configuration-tools >= 0.6 + , deepseq >= 1.4 + , directory >= 1.3 + , filepath >= 1.4 + , http-client >= 0.5 + , http-client-tls >=0.3 + , lens >= 4.17 + , loglevel >= 0.1 + , managed >= 1.0 + , streaming >= 0.2 + , text >= 2.0 + , time >= 1.12.2 + , yet-another-logger >= 0.4.1 + , exceptions >=0.10 + , mtl >=2.3 + , pact-json >=0.1 + , unordered-containers >=0.2 + , warp >=3.4 + + + if !os(windows) + build-depends: + unix >= 2.7 + diff --git a/chainweb-beacon/src/Chainweb/Beacon/Configuration.hs b/chainweb-beacon/src/Chainweb/Beacon/Configuration.hs new file mode 100644 index 0000000000..4740f22e70 --- /dev/null +++ b/chainweb-beacon/src/Chainweb/Beacon/Configuration.hs @@ -0,0 +1,464 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} + +-- | +-- Module: Chainweb.Beacon.Configuration +-- Copyright: Copyright © 2021 Kadena LLC. +-- License: MIT +-- Maintainer: Lars Kuhtz +-- Stability: experimental +-- +module Chainweb.Beacon.Configuration +( +-- * Throttling Configuration + P2pThrottlingConfig(..) +, p2pThrottlingRate +, p2pThrottlingPutPeerRate +, defaultP2pThrottlingConfig +, pP2pThrottlingConfig + +-- * Service API Configuration +, ServiceApiConfig(..) +, serviceApiConfigPort +, serviceApiConfigInterface +, defaultServiceApiConfig +, pServiceApiConfig + +-- * Chainweb Configuration +, BeaconConfiguration(..) +, configChainwebVersion +, configHistoryLimit +, configMining +, configP2p +, configP2pThrottling +, configServiceApi +, configPayloadProviders +, defaultBeaconConfiguration +, pBeaconConfiguration +, validateBeaconConfiguration + +) where + +import Configuration.Utils hiding (Error, Lens', disabled) + +import Control.Lens hiding ((.=), (<.>)) +import Control.Monad +import Control.Monad.Catch (throwM) +import Control.Monad.Except + +import Data.Foldable +import Data.Maybe +import qualified Data.HashMap.Strict as HM +import qualified Data.HashSet as HS +import qualified Data.Text as T + +import GHC.Generics hiding (from) + +import Network.Wai.Handler.Warp hiding (Port) + +import Numeric.Natural (Natural) + +import Prelude hiding (log) + +-- internal modules + +import Chainweb.Difficulty +import Chainweb.HostAddress +import Chainweb.Miner.Config +import Chainweb.Payload.RestAPI (PayloadBatchLimit(..), defaultServicePayloadBatchLimit) +import Chainweb.Utils +import Chainweb.Version +import Chainweb.Version.Development +import Chainweb.Version.Mainnet +import Chainweb.Version.RecapDevelopment +import Chainweb.Version.Registry + +import P2P.Node.Configuration + +-- -------------------------------------------------------------------------- -- +-- Outline + +-- databaseDirectory: null +-- +-- chainweb-beacon: +-- +-- chainwebVersion: mainnet01 +-- +-- # This must not be smaller than the reorg limit +-- minimumHistoryDepth: 480 +-- +-- mining: +-- enabled: false +-- +-- # what does this? +-- limit: 1200 +-- +-- # what should we support here? Ideally all kinds of keys including gas +-- # station accounts and cross-chain support. +-- miners: [] +-- +-- # Microsecond seems a bit to aggressive, but, well, its future proof at +-- # least. +-- payloadRefreshDelay: 15000000 +-- +-- # Do we need this? We do not support public mining since a long time now. +-- # updateStreamLimit: 2000 +-- +-- # Even this may not be needed nodays in any more. If somebody connects a +-- # client that is not a good citizen it is not our problem. +-- # updateStreamTimeout: 240 +-- +-- p2p: +-- bootstrapReachability: 0.5 +-- ignoreBootstrapNodes: false +-- maxPeerCount: 50 +-- maxSessionCount: 10 +-- peer: +-- certificateChain: null +-- certificateChainFile: null +-- hostaddress: +-- hostname: 0.0.0.0 +-- port: 1789 +-- interface: '*' +-- key: null +-- keyFile: null +-- peers: [] +-- private: false +-- sessionTimeout: 240 +-- +-- # TODO this should be more finegrained with default values based on the +-- # protocol. +-- p2pThrottling: +-- global: 50.0 +-- putPeer: 11 +-- +-- # Should the service API move completely to EL? +-- # We need at least some basic API for the beacon chain: +-- # +-- # - cuts +-- # - headers +-- # - what about payloads and outputs? At least for now we need that. +-- # - what about SPV? +-- serviceApi: +-- interface: '*' +-- payloadBatchLimit: 1000 +-- port: 1848 +-- +-- # How do we actually enforce a particular payload provider? +-- # How do we call it? a protocol? +-- +-- # 'enabled: false' is the default for a chain. +-- payloadProviders: +-- - chain: 0 +-- enabled: true +-- hostaddress: +-- host: localhost +-- port: 19890 +-- type: evm +-- x-auth: +-- jwt-secret: abcdef +-- - chain: 1 +-- enabled: true +-- hostaddress: +-- host: localhost +-- port: 19891 +-- type: evm +-- x-auth: +-- jwt-secret: abcdef +-- - chain: 2 +-- enabled: true +-- hostaddress: +-- host: localhost +-- port: 19891 +-- type: evm +-- x-auth: +-- jwt-secret: abcdef +-- - chain: 3 +-- enabled: true +-- hostaddress: +-- host: localhost +-- port: 19891 +-- type: evm +-- x-auth: +-- jwt-secret: abcdef +-- + +-- -------------------------------------------------------------------------- -- +-- P2p Throttling Configuration + +data P2pThrottlingConfig = P2pThrottlingConfig + { _p2pThrottlingRate :: !Double + , _p2pThrottlingPutPeerRate :: !Double + -- ^ This should throttle aggressively. This endpoint does an expensive + -- check of the client. And we want to keep bad actors out of the + -- system. There should be no need for a client to call this endpoint on + -- the same node more often than at most few times peer minute. + } + deriving stock (Eq, Show) + +makeLenses ''P2pThrottlingConfig + +defaultP2pThrottlingConfig :: P2pThrottlingConfig +defaultP2pThrottlingConfig = P2pThrottlingConfig + { _p2pThrottlingRate = 50 -- per second, in a 100 burst + , _p2pThrottlingPutPeerRate = 11 -- per second, 1 for each p2p network + } + +instance ToJSON P2pThrottlingConfig where + toJSON o = object + [ "global" .= _p2pThrottlingRate o + , "putPeer" .= _p2pThrottlingPutPeerRate o + ] + +instance FromJSON (P2pThrottlingConfig -> P2pThrottlingConfig) where + parseJSON = withObject "P2pThrottlingConfig" $ \o -> id + <$< p2pThrottlingRate ..: "global" % o + <*< p2pThrottlingPutPeerRate ..: "putPeer" % o + +pP2pThrottlingConfig :: MParser P2pThrottlingConfig +pP2pThrottlingConfig = id + <$< p2pThrottlingRate .:: option auto + % long "p2p-throttle-global" + <> help "Set the global rate limiting on the P2P API in requests per second (with 100x burst)." + <> metavar "FLOAT" + <*< p2pThrottlingPutPeerRate .:: option auto + % long "p2p-throttle-put-peer" + <> help "Set the rate limiting on the P2P API in requests per second (with 100x burst)." + <> metavar "FLOAT" + +-- -------------------------------------------------------------------------- -- +-- Service API Configuration + +data ServiceApiConfig = ServiceApiConfig + { _serviceApiConfigPort :: !Port + -- ^ The public host address for service APIs. + -- A port number of 0 means that a free port is assigned by the system. + -- + -- The default is 1917 + , _serviceApiConfigInterface :: !HostPreference + -- ^ The network interface that the service APIs are bound to. Default is to + -- bind to all available interfaces ('*'). + , _serviceApiPayloadBatchLimit :: PayloadBatchLimit + -- ^ maximum size for payload batches on the service API. Default is + -- 'Chainweb.Payload.RestAPI.defaultServicePayloadBatchLimit'. + } + deriving (Show, Eq, Generic) + +makeLenses ''ServiceApiConfig + +defaultServiceApiConfig :: ServiceApiConfig +defaultServiceApiConfig = ServiceApiConfig + { _serviceApiConfigPort = 1848 + , _serviceApiConfigInterface = "*" + , _serviceApiPayloadBatchLimit = defaultServicePayloadBatchLimit + } + +instance ToJSON ServiceApiConfig where + toJSON o = object + [ "port" .= _serviceApiConfigPort o + , "interface" .= hostPreferenceToText (_serviceApiConfigInterface o) + , "payloadBatchLimit" .= _serviceApiPayloadBatchLimit o + ] + +instance FromJSON (ServiceApiConfig -> ServiceApiConfig) where + parseJSON = withObject "ServiceApiConfig" $ \o -> id + <$< serviceApiConfigPort ..: "port" % o + <*< setProperty serviceApiConfigInterface "interface" (parseJsonFromText "interface") o + <*< serviceApiPayloadBatchLimit ..: "payloadBatchLimit" % o + +pServiceApiConfig :: MParser ServiceApiConfig +pServiceApiConfig = id + <$< serviceApiConfigPort .:: pPort service + <*< serviceApiConfigInterface .:: textOption + % prefixLong service "interface" + <> suffixHelp service "interface that the service Rest API binds to (see HostPreference documentation for details)" + <*< serviceApiPayloadBatchLimit .:: fmap PayloadBatchLimit . option auto + % prefixLong service "payload-batch-limit" + <> suffixHelp service "upper limit for the size of payload batches on the service API" + where + service = Just "service" + +-- -------------------------------------------------------------------------- -- +-- Payload Provider Config + +data PayloadProviderType + = Pact + | Evm + deriving (Show, Eq, Ord, Generic) + +instance HasTextRepresentation PayloadProviderType where + toText Pact = "pact" + toText Evm = "evm" + + fromText "pact" = return Pact + fromText "Pact" = return Pact + fromText "evm" = return Evm + fromText "EVM" = return Evm + fromText t = throwM $ TextFormatException $ "failed to parse payload provider type " <> sshow t + +instance FromJSON PayloadProviderType where + parseJSON = parseJsonFromText "PayloadProviderType" + +instance ToJSON PayloadProviderType where + toEncoding = toEncoding . toText + toJSON = toJSON . toText + +data PayloadProviderConfig = PayloadProviderConfig + { _payloadProviderConfigChain :: !ChainId + , _payloadProviderConfigHostAddress :: !HostAddress + , _payloadProviderConfigType :: !PayloadProviderType + , _payloadProviderXAuth :: !Value + -- ^ provider specific authentication information in JSON format, that + -- is passed to the provider plugin + } + deriving (Show, Eq, Ord, Generic) + +instance ToJSON PayloadProviderConfig where + toJSON o = object + [ "chain" .= _payloadProviderConfigChain o + , "hostaddress" .= _payloadProviderConfigHostAddress o + , "type" .= _payloadProviderConfigType o + , "x-auth" .= _payloadProviderXAuth o + ] + +instance FromJSON PayloadProviderConfig where + parseJSON = withObject "PayloadProviderConfig" $ \o -> PayloadProviderConfig + <$> o .: "chain" + <*> o .: "hostaddress" + <*> o .: "type" + <*> o .: "x-auth" + +-- -------------------------------------------------------------------------- -- +-- History Limit + +newtype HistoryLimit = HistoryLimit Natural + deriving (Show, Eq, Ord, Generic) + deriving newtype (Num, ToJSON, FromJSON) + +-- -------------------------------------------------------------------------- -- +-- Beacon Configuration + +data BeaconConfiguration = BeaconConfiguration + { _configChainwebVersion :: !ChainwebVersion + , _configHistoryLimit :: !(Maybe HistoryLimit) + , _configMining :: !MiningConfig + , _configP2p :: !P2pConfiguration + , _configP2pThrottling :: !P2pThrottlingConfig + , _configServiceApi :: !ServiceApiConfig + , _configPayloadProviders :: ![PayloadProviderConfig] + } deriving (Show, Eq, Generic) + +makeLenses ''BeaconConfiguration + +instance HasChainwebVersion BeaconConfiguration where + _chainwebVersion = _configChainwebVersion + {-# INLINE _chainwebVersion #-} + +validateBeaconConfiguration :: ConfigValidation BeaconConfiguration [] +validateBeaconConfiguration c = do + validateMinerConfig (_configChainwebVersion c) (_configMining c) + unless (c ^. chainwebVersion . versionDefaults . disablePeerValidation) $ + validateP2pConfiguration (_configP2p c) + validateChainwebVersion (_configChainwebVersion c) + validatePayloadProvidersConfig (_configPayloadProviders c) + +validateChainwebVersion :: ConfigValidation ChainwebVersion [] +validateChainwebVersion v = unless (isDevelopment || elem v knownVersions) $ + throwError $ T.unwords + [ "Specifying version properties is only legal with chainweb-version" + , "set to recap-development or development, but version is set to" + , sshow (_versionName v) + ] + where + isDevelopment = _versionCode v `elem` [_versionCode dv | dv <- [recapDevnet, devnet]] + +validatePayloadProvidersConfig :: ConfigValidation [PayloadProviderConfig] [] +validatePayloadProvidersConfig = error "validatePayloadProviders: TODO" + +defaultBeaconConfiguration :: ChainwebVersion -> BeaconConfiguration +defaultBeaconConfiguration v = BeaconConfiguration + { _configChainwebVersion = v + , _configHistoryLimit = Nothing + , _configMining = defaultMining + , _configP2p = defaultP2pConfiguration + , _configP2pThrottling = defaultP2pThrottlingConfig + , _configServiceApi = defaultServiceApiConfig + , _configPayloadProviders = [] + } + +instance ToJSON BeaconConfiguration where + toJSON o = object + [ "chainwebVersion" .= _versionName (_configChainwebVersion o) + , "historyLimit" .= _configHistoryLimit o + , "mining" .= _configMining o + , "p2p" .= _configP2p o + , "p2pThrottling" .= _configP2pThrottling o + , "serviceApi" .= _configServiceApi o + , "payloadProviders" .= _configPayloadProviders o + ] + +instance FromJSON BeaconConfiguration where + parseJSON = fmap ($ defaultBeaconConfiguration Mainnet01) . parseJSON + +instance FromJSON (BeaconConfiguration -> BeaconConfiguration) where + parseJSON = withObject "BeaconConfiguration" $ \o -> id + <$< setProperty configChainwebVersion "chainwebVersion" + (findKnownVersion <=< parseJSON) o + <*< configHistoryLimit ..: "historyLimit" % o + <*< configMining %.: "mining" % o + <*< configP2p %.: "p2p" % o + <*< configP2pThrottling %.: "p2pThrottling" % o + <*< configServiceApi %.: "serviceApi" % o + <*< configPayloadProviders .from leftMonoidalUpdate %.: "payloadProviders" % o + +pBeaconConfiguration :: MParser BeaconConfiguration +pBeaconConfiguration = id + <$< configChainwebVersion %:: parseVersion + <*< configHistoryLimit .:: fmap (Just . HistoryLimit) . option auto + % long "history-limit" + <> help "Minimum history in block heights that is kept by this node" + <*< configMining %:: pMiningConfig + <*< configP2p %:: pP2pConfiguration + <*< configP2pThrottling %:: pP2pThrottlingConfig + <*< configServiceApi %:: pServiceApiConfig + +parseVersion :: MParser ChainwebVersion +parseVersion = constructVersion + <$> optional + (option (findKnownVersion =<< textReader) + % long "chainweb-version" + <> short 'v' + <> help "the chainweb version that this node is using" + ) + <*> optional (textOption @Fork (long "fork-upper-bound" <> help "(development mode only) the latest fork the node will enable")) + <*> optional (BlockDelay <$> textOption (long "block-delay" <> help "(development mode only) the block delay in seconds per block")) + <*> switch (long "disable-pow" <> help "(development mode only) disable proof of work check") + where + constructVersion cliVersion fub bd disablePow' oldVersion = winningVersion + & versionBlockDelay .~ fromMaybe (_versionBlockDelay winningVersion) bd + & versionForks %~ HM.filterWithKey (\fork _ -> fork <= fromMaybe maxBound fub) + & versionUpgrades .~ + maybe (_versionUpgrades winningVersion) (\fub' -> + OnChains $ HM.mapWithKey + (\cid _ -> + case winningVersion ^?! versionForks . at fub' . _Just . atChain cid of + ForkNever -> error "Chainweb.Chainweb.Configuration.parseVersion: the fork upper bound never occurs in this version." + ForkAtBlockHeight fubHeight -> HM.filterWithKey (\bh _ -> bh <= fubHeight) (winningVersion ^?! versionUpgrades . atChain cid) + ForkAtGenesis -> winningVersion ^?! versionUpgrades . atChain cid + ) + (HS.toMap (chainIds winningVersion)) + ) fub + & versionCheats . disablePow .~ disablePow' + where + winningVersion = fromMaybe oldVersion cliVersion diff --git a/chainweb-beacon/src/ChainwebBeaconNode.hs b/chainweb-beacon/src/ChainwebBeaconNode.hs new file mode 100644 index 0000000000..a382371021 --- /dev/null +++ b/chainweb-beacon/src/ChainwebBeaconNode.hs @@ -0,0 +1,730 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TypeApplications #-} + +{-# OPTIONS_GHC -fno-warn-orphans #-} +{-# LANGUAGE ImportQualifiedPost #-} + +-- | +-- Module: BeaconNode +-- Copyright: Copyright © 2024 Kadena LLC. +-- License: MIT +-- Maintainer: Lars Kuhtz +-- Stability: experimental +-- +module BeaconNode +( main +) where + +import Configuration.Utils hiding (Error) +import Configuration.Utils.Validation (validateFilePath) + +import Control.Concurrent +import Control.Concurrent.Async +import Control.DeepSeq +import Control.Exception +import Control.Lens hiding ((.=)) +import Control.Monad +import Control.Monad.Managed + +import Data.HashMap.Strict qualified as HM +import Data.Text (Text) +import Data.Text qualified as T +import Data.Time +import Data.Typeable + +import GHC.Generics hiding (from) +import GHC.Stack +import GHC.Stats + +import Network.HTTP.Client qualified as HTTP +import Network.HTTP.Client.TLS qualified as HTTPS + +import Streaming.Prelude qualified as S + +import System.Directory +import System.FilePath +import System.IO +import System.Logger qualified as L +import System.LogLevel +import System.Mem + +-- internal modules + +import Chainweb.BlockHeader +import Chainweb.Chainweb +import Chainweb.Beacon.Configuration +import Chainweb.Chainweb.CutResources +import Chainweb.Counter +import Chainweb.Cut.CutHashes +import Chainweb.CutDB +import Chainweb.Logger +import Chainweb.Logging.Config +import Chainweb.Logging.Miner +import Chainweb.Miner.Coordinator (MiningStats) +import Chainweb.Payload +import Chainweb.Payload.PayloadStore +import Chainweb.Time +import Data.Time.Format.ISO8601 +import Chainweb.Utils +import Chainweb.Utils.RequestLog +import Chainweb.Version +import Chainweb.Version.Mainnet +import Chainweb.Version.Testnet04 (testnet04) +import Chainweb.Version.Registry + +import Chainweb.Storage.Table.RocksDB + +import Data.LogMessage + +import P2P.Node + +import PkgInfo + +import Utils.Logging +import Utils.Logging.Config +import Utils.Logging.Trace + +import Utils.CheckRLimits +import Utils.InstallSignalHandlers +import Chainweb.Chainweb.PeerResources +import Chainweb.HostAddress + +-- -------------------------------------------------------------------------- -- +-- Beacon Chain Configuration + +data BeaconNodeConfiguration = BeaconNodeConfiguration + { _nodeConfigChainweb :: !BeaconConfiguration + , _nodeConfigLog :: !LogConfig + , _nodeConfigDatabaseDirectory :: !(Maybe FilePath) + , _nodeConfigResetChainDbs :: !Bool + } + deriving (Show, Eq, Generic) + +makeLenses ''BeaconNodeConfiguration + +defaultBeaconNodeConfiguration :: BeaconNodeConfiguration +defaultBeaconNodeConfiguration = BeaconNodeConfiguration + { _nodeConfigChainweb = defaultBeaconConfiguration Mainnet01 + , _nodeConfigLog = defaultLogConfig + & logConfigLogger . L.loggerConfigThreshold .~ level + , _nodeConfigDatabaseDirectory = Nothing + , _nodeConfigResetChainDbs = False + } + where + level = L.Info + +validateBeaconNodeConfiguration :: ConfigValidation BeaconNodeConfiguration [] +validateBeaconNodeConfiguration o = do + validateLogConfig $ _nodeConfigLog o + validateBeaconConfiguration $ _nodeConfigChainweb o + mapM_ (validateFilePath "databaseDirectory") (_nodeConfigDatabaseDirectory o) + +instance ToJSON BeaconNodeConfiguration where + toJSON o = object + [ "chainweb-beacon" .= _nodeConfigChainweb o + , "logging" .= _nodeConfigLog o + , "databaseDirectory" .= _nodeConfigDatabaseDirectory o + , "resetChainDatabases" .= _nodeConfigResetChainDbs o + ] + +instance FromJSON (BeaconNodeConfiguration -> BeaconNodeConfiguration) where + parseJSON = withObject "ChainwebNodeConfig" $ \o -> id + <$< nodeConfigChainweb %.: "chainweb-beacon" % o + <*< nodeConfigLog %.: "logging" % o + <*< nodeConfigDatabaseDirectory ..: "databaseDirectory" % o + <*< nodeConfigResetChainDbs ..: "resetChainDatabases" % o + +pBeaconNodeConfiguration :: MParser BeaconNodeConfiguration +pBeaconNodeConfiguration = id + <$< nodeConfigChainweb %:: pBeaconConfiguration + <*< nodeConfigLog %:: pLogConfig + <*< nodeConfigDatabaseDirectory .:: fmap Just % textOption + % long "database-directory" + <> help "directory where the databases are persisted" + <*< nodeConfigResetChainDbs .:: enableDisableFlag + % long "reset-chain-databases" + <> help "Reset the chain databases for all chains on startup" + +getRocksDbDir :: HasCallStack => BeaconNodeConfiguration -> IO FilePath +getRocksDbDir conf = (\base -> base "0" "rocksDb") <$> getDbBaseDir conf + +getBackupsDir :: HasCallStack => BeaconNodeConfiguration -> IO FilePath +getBackupsDir conf = ( "backups") <$> getDbBaseDir conf + +getDbBaseDir :: HasCallStack => BeaconNodeConfiguration -> IO FilePath +getDbBaseDir conf = case _nodeConfigDatabaseDirectory conf of + Nothing -> getXdgDirectory XdgData + $ "chainweb-node" sshow (_versionName v) + Just d -> return d + where + v = _configChainwebVersion $ _nodeConfigChainweb conf + +-- -------------------------------------------------------------------------- -- +-- Monitors + +-- | Run a monitor function with a logger forever. If the monitor function exist +-- or fails the event is logged and the function is restarted. +-- +-- In order to prevent the function to spin in case of a persistent failure +-- cause, only 10 immediate restart are allowed. After that restart is throttled +-- to at most one restart every 10 seconds. +-- +runMonitorLoop :: Logger logger => Text -> logger -> IO () -> IO () +runMonitorLoop actionLabel logger = runForeverThrottled + (logFunction logger) + actionLabel + 10 -- 10 bursts in case of failure + (10 * mega) -- allow restart every 10 seconds in case of failure + +runCutMonitor :: Logger logger => logger -> CutDb tbl -> IO () +runCutMonitor logger db = L.withLoggerLabel ("component", "cut-monitor") logger $ \l -> + runMonitorLoop "ChainwebNode.runCutMonitor" l $ do + logFunctionJson l Info . cutToCutHashes Nothing + =<< _cut db + threadDelay 15_000_000 + +data BlockUpdate = BlockUpdate + { _blockUpdateBlockHeader :: !(ObjectEncoded BlockHeader) + , _blockUpdateOrphaned :: !Bool + , _blockUpdateTxCount :: !Int + } + deriving (Show, Eq, Ord, Generic, NFData) + +instance ToJSON BlockUpdate where + toEncoding o = pairs + $ "header" .= _blockUpdateBlockHeader o + <> "orphaned" .= _blockUpdateOrphaned o + <> "txCount" .= _blockUpdateTxCount o + toJSON o = object + [ "header" .= _blockUpdateBlockHeader o + , "orphaned" .= _blockUpdateOrphaned o + , "txCount" .= _blockUpdateTxCount o + ] + + {-# INLINE toEncoding #-} + {-# INLINE toJSON #-} + +runBlockUpdateMonitor :: CanReadablePayloadCas tbl => Logger logger => logger -> CutDb tbl -> IO () +runBlockUpdateMonitor logger db = L.withLoggerLabel ("component", "block-update-monitor") logger $ \l -> + runMonitorLoop "ChainwebNode.runBlockUpdateMonitor" l $ do + blockDiffStream db + & S.mapM toUpdate + & S.mapM_ (logFunctionJson l Info) + where + payloadDb = view cutDbPayloadDb db + + txCount :: BlockHeader -> IO Int + txCount bh = do + bp <- lookupPayloadDataWithHeight payloadDb (Just $ view blockHeight bh) (view blockPayloadHash bh) >>= \case + Nothing -> error "block payload not found" + Just x -> return x + return $ length $ view payloadDataTransactions bp + + toUpdate :: Either BlockHeader BlockHeader -> IO BlockUpdate + toUpdate (Right bh) = BlockUpdate + <$> pure (ObjectEncoded bh) -- _blockUpdateBlockHeader + <*> pure False -- _blockUpdateOrphaned + <*> txCount bh -- _blockUpdateTxCount + toUpdate (Left bh) = BlockUpdate + <$> pure (ObjectEncoded bh) -- _blockUpdateBlockHeader + <*> pure True -- _blockUpdateOrphaned + <*> ((0 -) <$> txCount bh) -- _blockUpdateTxCount + +-- type CutLog = HM.HashMap ChainId (ObjectEncoded BlockHeader) + +-- This instances are OK, since this is the "Main" module of an application +-- +deriving instance NFData GCDetails +deriving instance NFData RTSStats + +deriving instance ToJSON GCDetails +deriving instance ToJSON RTSStats + +runRtsMonitor :: Logger logger => logger -> IO () +runRtsMonitor logger = L.withLoggerLabel ("component", "rts-monitor") logger go + where + go l = getRTSStatsEnabled >>= \case + False -> do + logFunctionText l Warn "RTS Stats isn't enabled. Run with '+RTS -T' to enable it." + True -> do + runMonitorLoop "Chainweb.Node.runRtsMonitor" l $ do + logFunctionText l Debug $ "logging RTS stats" + stats <- getRTSStats + logFunctionJson logger Info stats + approximateThreadDelay 60_000_000 {- 1 minute -} + +runQueueMonitor :: Logger logger => logger -> CutDb tbl -> IO () +runQueueMonitor logger cutDb = L.withLoggerLabel ("component", "queue-monitor") logger go + where + go l = do + runMonitorLoop "ChainwebNode.runQueueMonitor" l $ do + logFunctionText l Debug $ "logging cut queue stats" + stats <- getQueueStats cutDb + logFunctionJson logger Info stats + approximateThreadDelay 60_000_000 {- 1 minute -} + +data DbStats = DbStats + { dbStatsName :: !Text + , dbStatsSize :: !Integer + } deriving (Generic, NFData, ToJSON) + +runDatabaseMonitor :: Logger logger => logger -> FilePath -> IO () +runDatabaseMonitor logger rocksDbDir = L.withLoggerLabel ("component", "database-monitor") logger go + where + go l = do + runMonitorLoop "ChainwebNode.runDatabaseMonitor" l $ do + logFunctionText l Debug $ "logging database stats" + logFunctionJson l Info . DbStats "rocksDb" =<< sizeOf rocksDbDir + approximateThreadDelay 1_200_000_000 {- 20 minutes -} + sizeOf path = do + dir <- doesDirectoryExist path + file <- doesFileExist path + if dir then + fmap sum . traverse (sizeOf . (path )) =<< listDirectory path + else if file then + getFileSize path + else + pure 0 + +-- -------------------------------------------------------------------------- -- +-- + +-- Intializes all service API chainweb components but doesn't start any networking. +-- +withBeacon + :: forall logger + . Logger logger + => BeaconConfiguration + -> logger + -> RocksDb + -> (StartedChainweb logger -> IO ()) + -> IO () +withBeacon c logger rocksDb inner = + withPeerResources v (view configP2p confWithBootstraps) logger $ \logger' peer -> + withSocket serviceApiPort serviceApiHost $ \serviceSock -> do + let conf' = confWithBootstraps + & set configP2p (_peerResConfig peer) + & set (configServiceApi . serviceApiConfigPort) (fst serviceSock) + withBeaconInternal + conf' + logger' + peer + serviceSock + rocksDb + inner + where + serviceApiPort = _serviceApiConfigPort $ _configServiceApi c + serviceApiHost = _serviceApiConfigInterface $ _configServiceApi c + + v = _chainwebVersion c + + -- Here we inject the hard-coded bootstrap peer infos for the configured + -- chainweb version into the configuration. + confWithBootstraps + | _p2pConfigIgnoreBootstrapNodes (_configP2p c) = c + | otherwise = configP2p . p2pConfigKnownPeers + %~ (\x -> L.nub $ x <> _versionBootstraps v) $ c + +-- Intializes all service chainweb components but doesn't start any networking. +-- +withBeaconInternal + :: forall logger + . Logger logger + => BeaconConfiguration + -> logger + -> PeerResources logger + -> (Port, _) + -> RocksDb + -> (StartedChainweb logger -> IO ()) + -> IO () +withBeaconInternal conf logger peer serviceSock rocksDb inner = do + + initializePayloadDb v payloadDb + + logFunctionJson logger Info InitializingChainResources + + logg Debug "start initializing chain resources" + logFunctionText logger Info $ "opening pact db in directory " <> sshow pactDbDir + + concurrentWith + -- initialize chains concurrently + (\cid x -> withChainResources + v + cid + rocksDb + (chainLogger cid) + mcfg + payloadDb + x + ) + + -- initialize global resources after all chain resources are initialized + (\cs -> do + logg Debug "finished initializing chain resources" + global (HM.fromList $ zip cidsList cs) + ) + cidsList + where + cidsList :: [ChainId] + cidsList = toList cids + + payloadDb :: PayloadDb RocksDbTable + payloadDb = newPayloadDb rocksDb + + chainLogger :: ChainId -> logger + chainLogger cid = addLabel ("chain", toText cid) logger + + initLogger :: logger + initLogger = setComponent "init" logger + + logg :: LogFunctionText + logg = logFunctionText initLogger + + -- Initialize global resources + global + :: HM.HashMap ChainId (ChainResources logger) + -> IO () + global cs = do + let !webchain = mkWebBlockHeaderDb v (HM.map _chainResBlockHeaderDb cs) + + -- TODO + !payloadProviders = mkWebPayloadExecutionService (HM.map _chainResPayloadProvider cs) + + !cutLogger = setComponent "cut" logger + !mgr = _peerResManager peer + + logg Debug "start initializing cut resources" + logFunctionJson logger Info InitializingCutResources + + withCutResources cutConfig peer cutLogger rocksDb webchain payloadDb mgr payloadProviders $ \cuts -> do + logg Debug "finished initializing cut resources" + + let !mLogger = setComponent "miner" logger + !mConf = _configMining conf + !mCutDb = _cutResCutDb cuts + !throt = _configP2pThrottling conf + + -- initialize throttler + throttler <- mkGenericThrottler $ _throttlingRate throt + putPeerThrottler <- mkPutPeerThrottler $ _throttlingPeerRate throt + logg Debug "initialized throttlers" + + -- synchronize PayloadProviders with latest cut before we start the server + -- and clients and begin mining. + -- + -- This is a consistency check that validates the blocks in the + -- current cut. If it fails an exception is raised. Also, if it + -- takes long (for example, when doing a reset to a prior block + -- height) we want this to happen before we go online. + -- + initialCut <- _cut mCutDb + logg Info "start synchronizing payload providers to initial cut" + logFunctionJson logger Info InitialSyncInProgress + synchronizePactDb cs initialCut + logg Info "finished synchronizing payload providers to initial cut" + withPayloadData cs cuts $ \payloadData -> do + logg Debug "start initializing miner resources" + logFunctionJson logger Info InitializingMinerResources + + withMiningCoordination mLogger mConf mCutDb $ \mc -> + + -- Miner resources are used by the test-miner when in-node + -- mining is configured or by the mempool noop-miner (which + -- keeps the mempool updated) in production setups. + -- + withMinerResources mLogger (_miningInNode mConf) cs mCutDb mc $ \m -> do + logFunctionJson logger Info ChainwebStarted + logg Debug "finished initializing miner resources" + let !haddr = _peerConfigAddr $ _p2pConfigPeer $ _configP2p conf + inner $ StartedChainweb Chainweb + { _chainwebHostAddress = haddr + , _chainwebChains = cs + , _chainwebCutResources = cuts + , _chainwebMiner = m + , _chainwebCoordinator = mc + , _chainwebLogger = logger + , _chainwebPeer = peer + , _chainwebPayloadDb = view cutDbPayloadDb $ _cutResCutDb cuts + , _chainwebManager = mgr + , _chainwebPayloadData = pactData + , _chainwebGlobalThrottler = throttler + , _chainwebPutPeerThrottler = putPeerThrottler + , _chainwebConfig = conf + , _chainwebServiceSocket = serviceSock + } + + withPayloadData + :: HM.HashMap ChainId (ChainResources logger) + -> CutResources logger tbl + -> ([(ChainId, PactServerData logger tbl)] -> IO b) + -> IO b + withPayloadData cs cuts m = do + let l = sortBy (compare `on` fst) (HM.toList cs) + m $ l <&> fmap (\cr -> PactServerData + { _pactServerDataCutDb = _cutResCutDb cuts + , _pactServerDataLogger = _chainResLogger cr + , _pactServerDataPayloadProvider = _chainResPayloadProvider cr + }) + + v = _configChainwebVersion conf + cids = chainIds v + + -- FIXME: make this configurable + cutConfig :: CutDbParams + cutConfig = (defaultCutDbParams v $ _cutFetchTimeout cutConf) + { _cutDbParamsLogLevel = Info + , _cutDbParamsTelemetryLevel = Info + , _cutDbParamsInitialHeightLimit = _cutInitialBlockHeightLimit cutConf + , _cutDbParamsFastForwardHeightLimit = _cutFastForwardBlockHeightLimit cutConf + , _cutDbParamsReadOnly = _configOnlySyncPact conf || _configReadOnlyReplay conf + } + where + cutConf = _configCuts conf + + synchronizePayloadProvider :: HM.HashMap ChainId (ChainResources logger) -> Cut -> IO () + synchronizePayloadProvider cs targetCut = do + mapConcurrently_ syncOne $ + HM.intersectionWith (,) (_cutMap targetCut) cs + where + syncOne :: (BlockHeader, ChainResources logger) -> IO () + syncOne (bh, cr) = do + let provider = _chainResPayloadProvider cr + let logCr = logFunctionText + $ addLabel ("component", "pact") + $ addLabel ("sub-component", "init") + $ _chainResLogger cr + void $ _syncToBlock provider bh + logCr Debug "payload provider synchronized" + + +-- -------------------------------------------------------------------------- -- +-- Run Node + +node :: HasCallStack => Logger logger => BeaconNodeConfiguration -> logger -> IO () +node conf logger = do + dbBaseDir <- getDbBaseDir conf + when (_nodeConfigResetChainDbs conf) $ removeDirectoryRecursive dbBaseDir + rocksDbDir <- getRocksDbDir conf + dbBackupsDir <- getBackupsDir conf + withRocksDb rocksDbDir modernDefaultOptions $ \rocksDb -> do + logFunctionText logger Info $ "opened rocksdb in directory " <> sshow rocksDbDir + withChainweb cwConf logger rocksDb $ \case + Replayed _ _ -> return () + StartedChainweb cw -> do + let telemetryEnabled = + _enableConfigEnabled $ _logConfigTelemetryBackend $ _nodeConfigLog conf + concurrentlies_ + [ runChainweb cw (\_ -> return ()) + -- we should probably push 'onReady' deeper here but this should be ok + , when telemetryEnabled $ + runCutMonitor (_chainwebLogger cw) (_cutResCutDb $ _chainwebCutResources cw) + , when telemetryEnabled $ + runQueueMonitor (_chainwebLogger cw) (_cutResCutDb $ _chainwebCutResources cw) + , when telemetryEnabled $ + runRtsMonitor (_chainwebLogger cw) + , when telemetryEnabled $ + runBlockUpdateMonitor (_chainwebLogger cw) (_cutResCutDb $ _chainwebCutResources cw) + , when telemetryEnabled $ + runDatabaseMonitor (_chainwebLogger cw) rocksDbDir + ] + where + cwConf = _nodeConfigChainweb conf + +withNodeLogger + :: LogConfig + -> BeaconConfiguration + -> ChainwebVersion + -> (L.Logger SomeLogMessage -> IO ()) + -> IO () +withNodeLogger logCfg chainwebCfg v f = runManaged $ do + + -- This manager is used only for logging backends + mgr <- liftIO HTTPS.newTlsManager + + -- Base Backend + baseBackend <- managed + $ withBaseHandleBackend "ChainwebApp" mgr pkgInfoScopes (_logConfigBackend logCfg) + + -- Telemetry Backends + monitorBackend <- managed + $ mkTelemetryLogger @CutHashes mgr teleLogConfig + p2pInfoBackend <- managed + $ mkTelemetryLogger @P2pSessionInfo mgr teleLogConfig + rtsBackend <- managed + $ mkTelemetryLogger @RTSStats mgr teleLogConfig + counterBackend <- managed $ configureHandler + (withJsonHandleBackend @CounterLog "counters" mgr pkgInfoScopes) + teleLogConfig + newBlockBackend <- managed + $ mkTelemetryLogger @NewMinedBlock mgr teleLogConfig + orphanedBlockBackend <- managed + $ mkTelemetryLogger @OrphanedBlock mgr teleLogConfig + miningStatsBackend <- managed + $ mkTelemetryLogger @MiningStats mgr teleLogConfig + requestLogBackend <- managed + $ mkTelemetryLogger @RequestResponseLog mgr teleLogConfig + queueStatsBackend <- managed + $ mkTelemetryLogger @QueueStats mgr teleLogConfig + traceBackend <- managed + $ mkTelemetryLogger @Trace mgr teleLogConfig + blockUpdateBackend <- managed + $ mkTelemetryLogger @BlockUpdate mgr teleLogConfig + dbStatsBackend <- managed + $ mkTelemetryLogger @DbStats mgr teleLogConfig + p2pNodeStatsBackend <- managed + $ mkTelemetryLogger @P2pNodeStats mgr teleLogConfig + topLevelStatusBackend <- managed + $ mkTelemetryLogger @ChainwebStatus mgr teleLogConfig + + logger <- managed + $ L.withLogger (_logConfigLogger logCfg) $ logHandles + [ logFilterHandle (_logConfigFilter logCfg) + , logHandler monitorBackend + , logHandler p2pInfoBackend + , logHandler rtsBackend + , logHandler counterBackend + , logHandler newBlockBackend + , logHandler orphanedBlockBackend + , logHandler miningStatsBackend + , logHandler requestLogBackend + , logHandler queueStatsBackend + , logHandler traceBackend + , logHandler blockUpdateBackend + , logHandler dbStatsBackend + , logHandler p2pNodeStatsBackend + , logHandler topLevelStatusBackend + ] baseBackend + + liftIO $ f + $ maybe id (\x -> addLabel ("cluster", toText x)) (_logConfigClusterId logCfg) + $ addLabel ("chainwebVersion", sshow (_versionName v)) + $ logger + where + teleLogConfig = _logConfigTelemetryBackend logCfg + +mkTelemetryLogger + :: forall a b + . Typeable a + => ToJSON a + => HTTP.Manager + -> EnableConfig BackendConfig + -> (Backend (JsonLog a) -> IO b) + -> IO b +mkTelemetryLogger mgr = configureHandler + $ withJsonHandleBackend @a (sshow $ typeRep $ Proxy @a) mgr pkgInfoScopes + +-- -------------------------------------------------------------------------- -- +-- Service Date + +newtype ServiceDate = ServiceDate Text + +instance Show ServiceDate where + show (ServiceDate t) = "Service interval end: " <> T.unpack t + +instance Exception ServiceDate where + fromException = asyncExceptionFromException + toException = asyncExceptionToException + +withServiceDate + :: ChainwebVersion + -> (LogLevel -> Text -> IO ()) + -> Maybe UTCTime + -> IO a + -> IO a +withServiceDate v lf msd inner = case msd of + Nothing -> do + inner + Just sd -> do + if _versionCode v == _versionCode mainnet || _versionCode v == _versionCode testnet + then do + race (timer sd) inner >>= \case + Left () -> error "Service date thread terminated unexpectedly" + Right a -> return a + else do + inner + where + timer t = runForever lf "ServiceDate" $ do + now <- getCurrentTime + when (now >= t) $ do + lf Error shutdownMessage + throw $ ServiceDate shutdownMessage + + let w = diffUTCTime t now + let micros = round $ w * 1_000_000 + lf Warn warning + threadDelay $ min (10 * 60 * 1_000_000) micros + + where + warning :: Text + warning = T.concat + [ "This version of chainweb node will stop working at " <> sshow t <> "." + , " Please upgrade to a new version before that date." + ] + + shutdownMessage :: Text + shutdownMessage = T.concat + [ "Shutting down. This version of chainweb was only valid until" <> sshow t <> "." + , " Please upgrade to a new version." + ] + +-- -------------------------------------------------------------------------- -- +-- Encode Package Info into Log mesage scopes + +pkgInfoScopes :: [(Text, Text)] +pkgInfoScopes = + [ ("revision", revision) + , ("branch", branch) + , ("compiler", compiler) + , ("optimisation", optimisation) + , ("architecture", arch) + , ("package", package) + ] + +-- -------------------------------------------------------------------------- -- +-- main + +mainInfo :: ProgramInfo BeaconNodeConfiguration +mainInfo = programInfoValidate + "Chainweb Beacon Node" + pBeaconNodeConfiguration + defaultBeaconNodeConfiguration + validateBeaconNodeConfiguration + +handles :: [Handler a] -> IO a -> IO a +handles = flip catches + +main :: IO () +main = do + installFatalSignalHandlers [ sigHUP, sigTERM, sigXCPU, sigXFSZ ] + hSetBuffering stderr LineBuffering + checkRLimits + runWithPkgInfoConfiguration mainInfo pkgInfo $ \conf -> do + let v = _configChainwebVersion $ _nodeConfigBeacon conf + registerVersion v + withNodeLogger (_nodeConfigLog conf) (_nodeConfigBeacon conf) v $ \logger -> do + logFunctionJson logger Info ProcessStarted + handles + [ Handler $ \(e :: SomeAsyncException) -> + logFunctionJson logger Info (ProcessDied $ show e) >> throwIO e + , Handler $ \(e :: SomeException) -> + logFunctionJson logger Error (ProcessDied $ show e) >> throwIO e + ] $ do + kt <- mapM iso8601ParseM (_versionServiceDate v) + withServiceDate (_configChainwebVersion (_nodeConfigChainweb conf)) (logFunctionText logger) kt + $ void + $ race (node conf logger) (gcRunner (logFunctionText logger)) + where + gcRunner lf = runForever lf "GarbageCollect" $ do + performMajorGC + threadDelay (30 * 1_000_000) diff --git a/chainweb-beacon/src/Utils/CheckRLimits.hs b/chainweb-beacon/src/Utils/CheckRLimits.hs new file mode 100644 index 0000000000..0442684ff3 --- /dev/null +++ b/chainweb-beacon/src/Utils/CheckRLimits.hs @@ -0,0 +1,52 @@ +{-# language ScopedTypeVariables #-} +{-# language TypeApplications #-} +{-# language CApiFFI #-} + +module Utils.CheckRLimits(checkRLimits) where + +import Control.Exception +import Control.Monad +import Foreign hiding (void) +import Foreign.C.Error +import Foreign.C.Types +import System.Exit +import System.IO + +data UInt64Pair +foreign import ccall "rlim_utils.c get_open_file_limits" + c_getOpenFileLimits :: Ptr UInt64Pair -> IO CInt +foreign import ccall "rlim_utils.c set_open_file_limits" + c_setOpenFileLimits :: Ptr UInt64Pair -> IO CInt + +getOpenFileLimits :: IO (Word64, Word64) +getOpenFileLimits = allocaBytes (8 * 2) $ \pairPtr -> do + err <- c_getOpenFileLimits (castPtr pairPtr) + if err /= 0 then do + Errno errno <- getErrno + error $ "getOpenFileLimits: errno not equal to 0: " <> show errno + else (,) <$> peek pairPtr <*> peek (pairPtr `plusPtr` 8) + +setOpenFileLimits :: (Word64, Word64) -> IO () +setOpenFileLimits (soft, hard) = allocaBytes (8 * 2) $ \pairPtr -> do + poke pairPtr soft + poke (pairPtr `plusPtr` 8) hard + err <- c_setOpenFileLimits (castPtr pairPtr) + when (err /= 0) $ do + Errno errno <- getErrno + error $ "setOpenFileLimits: errno not equal to 0: " <> show errno + +checkRLimits :: IO () +checkRLimits = void $ try @IOException $ do + (soft, hard) <- getOpenFileLimits + when (hard < 32768) $ do + hPutStrLn stderr $ + "This process is only able to open " <> show hard <> " file descriptors at once, " + <> "which is not enough to run chainweb-node.\n" + <> "Set the open file limit higher than 32767 using the ulimit command or contact an administrator." + exitFailure + when (soft < 32768) $ do + setOpenFileLimits (hard, hard) + (soft', hard') <- getOpenFileLimits + when ((soft', hard') /= (hard, hard)) $ + hPutStrLn stderr $ + "Failed to set open file limit. This is an internal error. Continuing." diff --git a/chainweb-beacon/src/Utils/InstallSignalHandlers.hs b/chainweb-beacon/src/Utils/InstallSignalHandlers.hs new file mode 100644 index 0000000000..b11da022b3 --- /dev/null +++ b/chainweb-beacon/src/Utils/InstallSignalHandlers.hs @@ -0,0 +1,122 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveGeneric #-} + +-- | +-- Module: Utils.InstallSignalHandlers +-- Copyright: Copyright © 2020 Kadena LLC. +-- License: MIT +-- Maintainer: Lars Kuhtz +-- Stability: experimental +-- +-- Install Signal Handlers for SIGTERM and other signals that cause an "normal" +-- termination of the service. +-- +-- The implementation of 'installFatalSignalHandlers' is copied from +-- . +-- +-- The windows (mingw32_HOST_OS) implementation of install Handler is an +-- adaption of , which is copyright of +-- Copyright (c) 2015 Piotr Mlodawski. +-- +-- +module Utils.InstallSignalHandlers +( installFatalSignalHandlers +, installHandlerCross +, sigHUP, sigTERM, sigUSR1, sigUSR2, sigXCPU, sigXFSZ +) where + +import Control.Concurrent +import Control.Exception hiding (Handler) +import Control.Monad + +import Foreign.C.Types + +import GHC.Generics + +import System.Mem.Weak + +#if !mingw32_HOST_OS +import System.Posix.Signals +#else +import Foreign.Ptr +#endif + +-- -------------------------------------------------------------------------- -- +-- Windows (mingw) implementatin of 'installHandler' + +#if mingw32_HOST_OS +-- The windows (mingw32_HOST_OS) implementation of install Handler is an +-- adaption of , which is copyright of +-- Copyright (c) 2015 Piotr Mlodawski. + +type Signal = CInt + +sigHUP, sigTERM, sigUSR1, sigUSR2, sigXCPU, sigXFSZ :: Signal +sigHUP = 1 +sigTERM = 15 +sigUSR1 = 16 +sigUSR2 = 17 +sigXCPU = 30 +sigXFSZ = 31 + +type Handler = Signal -> IO () + +foreign import ccall "wrapper" + genHandler :: Handler -> IO (FunPtr Handler) + +foreign import ccall safe "signal.h signal" + install :: Signal -> FunPtr Handler -> IO Signal + +installHandler :: Signal -> Handler -> IO () +installHandler signal handler = do + result <- install signal =<< genHandler handler + return $ assert (result == 0) () +#endif + +-- -------------------------------------------------------------------------- -- +-- Install Signal Handlers + +newtype SignalException = SignalException Signal deriving (Show, Eq, Generic) +instance Exception SignalException + +installHandlerCross :: Signal -> (Signal -> IO ()) -> IO () +installHandlerCross s h = +#ifdef mingw32_HOST_OS + installHandler s h +#else + void $ installHandler s (Catch (h s)) Nothing +#endif + +-- | Handle SIGTERM (and other signals) that are supposed to terminate the +-- program. By default GHCs RTS only installs a handler for SIGINT (Ctrl-C). +-- This function install handlers that that raise an exception on the main +-- thread when a signal is received. This causes the execution of finalization +-- logic in brackets. +-- +-- This is particularly important for the SQLite, because it resets the WAL +-- files. Otherwise the files would never be deallocated and would remain at +-- their maximum size forever. Graceful shutdown will also result in better +-- logging of issues and prevent data corruption or missing data in database +-- backends. +-- +-- The implementation is copied fom +-- , which also explains +-- details. +-- +-- This asssumes that threads are managed properly. Threads that are spawned by +-- just calling forkIO, won't be notified and terminate without executing +-- termination logic. +-- +installFatalSignalHandlers :: [Signal] -> IO () +installFatalSignalHandlers signals = do + main_thread_id <- myThreadId + weak_tid <- mkWeakThreadId main_thread_id + forM_ signals $ \sig -> + installHandlerCross sig (send_exception weak_tid) + where + send_exception weak_tid sig = do + m <- deRefWeak weak_tid + case m of + Nothing -> return () + Just tid -> throwTo tid (toException $ SignalException sig) +