diff --git a/Makefile b/Makefile index f6daa0bab5..cb2303ddab 100644 --- a/Makefile +++ b/Makefile @@ -87,11 +87,14 @@ PORTAL_TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(FLUFFY_TOOLS)) OS_PLATFORM = $(shell $(CC) -dumpmachine) ifneq (, $(findstring darwin, $(OS_PLATFORM))) SHAREDLIBEXT = dylib + STATICLIBEXT = a else ifneq (, $(findstring mingw, $(OS_PLATFORM))$(findstring cygwin, $(OS_PLATFORM))$(findstring msys, $(OS_PLATFORM))) SHAREDLIBEXT = dll + STATICLIBEXT = lib else SHAREDLIBEXT = so + STATICLIBEXT = a endif endif @@ -165,7 +168,7 @@ all: | $(TOOLS) nimbus nimbus_execution_client # "-d:release" cannot be added to config.nims -NIM_PARAMS += -d:release +NIM_PARAMS += -d:debug ifneq ($(if $(ENABLE_LINE_NUMBERS),$(ENABLE_LINE_NUMBERS),0),0) NIM_PARAMS += -d:chronicles_line_numbers:1 endif @@ -348,7 +351,8 @@ nimbus-verified-proxy-test: | build deps libverifproxy: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim --version && \ - $(ENV_SCRIPT) nim c --app:lib -d:"libp2p_pki_schemes=secp256k1" --noMain:on --threads:on --nimcache:nimcache/libverifproxy -o:$(VERIF_PROXY_OUT_PATH)/$@.$(SHAREDLIBEXT) $(NIM_PARAMS) nimbus_verified_proxy/libverifproxy/verifproxy.nim + echo $(NIM_PARAMS) && \ + $(ENV_SCRIPT) nim c --app:staticlib -d:"libp2p_pki_schemes=secp256k1" --noMain:on --out:$(VERIF_PROXY_OUT_PATH)/$@.$(STATICLIBEXT) $(NIM_PARAMS) nimbus_verified_proxy/libverifproxy/verifproxy.nim cp nimbus_verified_proxy/libverifproxy/verifproxy.h $(VERIF_PROXY_OUT_PATH)/ echo -e $(BUILD_END_MSG) "build/$@" diff --git a/nimbus_verified_proxy/c_frontend.nim b/nimbus_verified_proxy/c_frontend.nim deleted file mode 100644 index 0daad6cba0..0000000000 --- a/nimbus_verified_proxy/c_frontend.nim +++ /dev/null @@ -1,197 +0,0 @@ -# nimbus_verified_proxy -# Copyright (c) 2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [], gcsafe.} - -import - stint, - std/strutils, - json_rpc/[rpcserver, rpcproxy], - web3/[eth_api, eth_api_types], - ../execution_chain/rpc/cors, - ./engine/types, - ./nimbus_verified_proxy_conf - -type JsonRpcServer* = ref object - case kind*: ClientKind #we reuse clientKind for servers also - of Http: - httpServer: RpcHttpServer - of WebSocket: - wsServer: RpcWebSocketServer - -proc init*( - T: type JsonRpcServer, url: Web3Url -): JsonRpcServer {.raises: [JsonRpcError, ValueError, TransportAddressError].} = - let - auth = @[httpCors(@[])] # TODO: for now we serve all cross origin requests - parsedUrl = parseUri(url.web3Url) - hostname = if parsedUrl.hostname == "": "127.0.0.1" else: parsedUrl.hostname - port = - if parsedUrl.port == "": - 8545 - else: - parseInt(parsedUrl.port) - listenAddress = initTAddress(hostname, port) - - case url.kind - of HttpUrl: - JsonRpcServer( - kind: Http, httpServer: newRpcHttpServer([listenAddress], RpcRouter.init(), auth) - ) - of WsUrl: - let server = - JsonRpcServer(kind: WebSocket, wsServer: newRpcWebSocketServer(listenAddress)) - - server.wsServer.router = RpcRouter.init() - server - -func getServer(server: JsonRpcServer): RpcServer = - case server.kind - of Http: server.httpServer - of WebSocket: server.wsServer - -proc start*(server: JsonRpcServer): Result[void, string] = - try: - case server.kind - of Http: - server.httpServer.start() - of WebSocket: - server.wsServer.start() - except CatchableError as e: - return err(e.msg) - - ok() - -proc injectEngineFrontend*(server: JsonRpcServer, frontend: EthApiFrontend) = - server.getServer().rpc("eth_blockNumber") do() -> uint64: - await frontend.eth_blockNumber() - - server.getServer().rpc("eth_getBalance") do( - address: Address, quantityTag: BlockTag - ) -> UInt256: - await frontend.eth_getBalance(address, quantityTag) - - server.getServer().rpc("eth_getStorageAt") do( - address: Address, slot: UInt256, quantityTag: BlockTag - ) -> FixedBytes[32]: - await frontend.eth_getStorageAt(address, slot, quantityTag) - - server.getServer().rpc("eth_getTransactionCount") do( - address: Address, quantityTag: BlockTag - ) -> Quantity: - await frontend.eth_getTransactionCount(address, quantityTag) - - server.getServer().rpc("eth_getCode") do( - address: Address, quantityTag: BlockTag - ) -> seq[byte]: - await frontend.eth_getCode(address, quantityTag) - - server.getServer().rpc("eth_getBlockByHash") do( - blockHash: Hash32, fullTransactions: bool - ) -> BlockObject: - await frontend.eth_getBlockByHash(blockHash, fullTransactions) - - server.getServer().rpc("eth_getBlockByNumber") do( - blockTag: BlockTag, fullTransactions: bool - ) -> BlockObject: - await frontend.eth_getBlockByNumber(blockTag, fullTransactions) - - server.getServer().rpc("eth_getUncleCountByBlockNumber") do( - blockTag: BlockTag - ) -> Quantity: - await frontend.eth_getUncleCountByBlockNumber(blockTag) - - server.getServer().rpc("eth_getUncleCountByBlockHash") do( - blockHash: Hash32 - ) -> Quantity: - await frontend.eth_getUncleCountByBlockHash(blockHash) - - server.getServer().rpc("eth_getBlockTransactionCountByNumber") do( - blockTag: BlockTag - ) -> Quantity: - await frontend.eth_getBlockTransactionCountByNumber(blockTag) - - server.getServer().rpc("eth_getBlockTransactionCountByHash") do( - blockHash: Hash32 - ) -> Quantity: - await frontend.eth_getBlockTransactionCountByHash(blockHash) - - server.getServer().rpc("eth_getTransactionByBlockNumberAndIndex") do( - blockTag: BlockTag, index: Quantity - ) -> TransactionObject: - await frontend.eth_getTransactionByBlockNumberAndIndex(blockTag, index) - - server.getServer().rpc("eth_getTransactionByBlockHashAndIndex") do( - blockHash: Hash32, index: Quantity - ) -> TransactionObject: - await frontend.eth_getTransactionByBlockHashAndIndex(blockHash, index) - - server.getServer().rpc("eth_call") do( - tx: TransactionArgs, blockTag: BlockTag, optimisticStateFetch: Opt[bool] - ) -> seq[byte]: - await frontend.eth_call(tx, blockTag, optimisticStateFetch.get(true)) - - server.getServer().rpc("eth_createAccessList") do( - tx: TransactionArgs, blockTag: BlockTag, optimisticStateFetch: Opt[bool] - ) -> AccessListResult: - await frontend.eth_createAccessList(tx, blockTag, optimisticStateFetch.get(true)) - - server.getServer().rpc("eth_estimateGas") do( - tx: TransactionArgs, blockTag: BlockTag, optimisticStateFetch: Opt[bool] - ) -> Quantity: - await frontend.eth_estimateGas(tx, blockTag, optimisticStateFetch.get(true)) - - server.getServer().rpc("eth_getTransactionByHash") do( - txHash: Hash32 - ) -> TransactionObject: - await frontend.eth_getTransactionByHash(txHash) - - server.getServer().rpc("eth_getBlockReceipts") do( - blockTag: BlockTag - ) -> Opt[seq[ReceiptObject]]: - await frontend.eth_getBlockReceipts(blockTag) - - server.getServer().rpc("eth_getTransactionReceipt") do( - txHash: Hash32 - ) -> ReceiptObject: - await frontend.eth_getTransactionReceipt(txHash) - - server.getServer().rpc("eth_getLogs") do( - filterOptions: FilterOptions - ) -> seq[LogObject]: - await frontend.eth_getLogs(filterOptions) - - server.getServer().rpc("eth_newFilter") do(filterOptions: FilterOptions) -> string: - await frontend.eth_newFilter(filterOptions) - - server.getServer().rpc("eth_uninstallFilter") do(filterId: string) -> bool: - await frontend.eth_uninstallFilter(filterId) - - server.getServer().rpc("eth_getFilterLogs") do(filterId: string) -> seq[LogObject]: - await frontend.eth_getFilterLogs(filterId) - - server.getServer().rpc("eth_getFilterChanges") do(filterId: string) -> seq[LogObject]: - await frontend.eth_getFilterChanges(filterId) - - server.getServer().rpc("eth_blobBaseFee") do() -> UInt256: - await frontend.eth_blobBaseFee() - - server.getServer().rpc("eth_gasPrice") do() -> Quantity: - await frontend.eth_gasPrice() - - server.getServer().rpc("eth_maxPriorityFeePerGas") do() -> Quantity: - await frontend.eth_maxPriorityFeePerGas() - -proc stop*(server: JsonRpcServer) {.async: (raises: [CancelledError]).} = - try: - case server.kind - of Http: - await server.httpServer.closeWait() - of WebSocket: - await server.wsServer.closeWait() - except CatchableError as e: - raise newException(CancelledError, e.msg) diff --git a/nimbus_verified_proxy/lc/lc.nim b/nimbus_verified_proxy/lc/lc.nim new file mode 100644 index 0000000000..eaf6614822 --- /dev/null +++ b/nimbus_verified_proxy/lc/lc.nim @@ -0,0 +1,207 @@ +# nimbus_verified_proxy +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + chronicles, + chronos, + eth/common/keys, # used for keys.rng + beacon_chain/gossip_processing/light_client_processor, + beacon_chain/[beacon_clock, conf], + ./lc_manager # use the modified light client manager + +type + LightClientHeaderCallback* = proc( + lightClient: LightClient, header: ForkedLightClientHeader + ) {.gcsafe, raises: [].} + + LightClient* = ref object + cfg*: RuntimeConfig + forkDigests*: ref ForkDigests + getBeaconTime*: GetBeaconTimeFn + store*: ref ForkedLightClientStore + processor*: ref LightClientProcessor + manager: LightClientManager + onFinalizedHeader*, onOptimisticHeader*: LightClientHeaderCallback + trustedBlockRoot*: Option[Eth2Digest] + +func getFinalizedHeader*(lightClient: LightClient): ForkedLightClientHeader = + withForkyStore(lightClient.store[]): + when lcDataFork > LightClientDataFork.None: + var header = ForkedLightClientHeader(kind: lcDataFork) + header.forky(lcDataFork) = forkyStore.finalized_header + header + else: + default(ForkedLightClientHeader) + +func getOptimisticHeader*(lightClient: LightClient): ForkedLightClientHeader = + withForkyStore(lightClient.store[]): + when lcDataFork > LightClientDataFork.None: + var header = ForkedLightClientHeader(kind: lcDataFork) + header.forky(lcDataFork) = forkyStore.optimistic_header + header + else: + default(ForkedLightClientHeader) + +proc new*( + T: type LightClient, + rng: ref HmacDrbgContext, + cfg: RuntimeConfig, + forkDigests: ref ForkDigests, + getBeaconTime: GetBeaconTimeFn, + genesis_validators_root: Eth2Digest, + finalizationMode: LightClientFinalizationMode, +): T = + let lightClient = LightClient( + cfg: cfg, + forkDigests: forkDigests, + getBeaconTime: getBeaconTime, + store: (ref ForkedLightClientStore)(), + ) + + func getTrustedBlockRoot(): Option[Eth2Digest] = + lightClient.trustedBlockRoot + + proc onStoreInitialized() = + discard + + proc onFinalizedHeader() = + if lightClient.onFinalizedHeader != nil: + lightClient.onFinalizedHeader(lightClient, lightClient.getFinalizedHeader) + + proc onOptimisticHeader() = + if lightClient.onOptimisticHeader != nil: + lightClient.onOptimisticHeader(lightClient, lightClient.getOptimisticHeader) + + const + dumpEnabled = false + dumpDirInvalid = "." + dumpDirIncoming = "." + + # initialize without dumping + lightClient.processor = LightClientProcessor.new( + dumpEnabled, dumpDirInvalid, dumpDirIncoming, cfg, genesis_validators_root, + finalizationMode, lightClient.store, getBeaconTime, getTrustedBlockRoot, + onStoreInitialized, onFinalizedHeader, onOptimisticHeader, + ) + + proc lightClientVerifier( + obj: SomeForkedLightClientObject + ): Future[Result[void, LightClientVerifierError]] {. + async: (raises: [CancelledError], raw: true) + .} = + let resfut = Future[Result[void, LightClientVerifierError]] + .Raising([CancelledError]) + .init("lightClientVerifier") + lightClient.processor[].addObject(MsgSource.gossip, obj, resfut) + resfut + + proc bootstrapVerifier(obj: ForkedLightClientBootstrap): auto = + lightClientVerifier(obj) + + proc updateVerifier(obj: ForkedLightClientUpdate): auto = + lightClientVerifier(obj) + + proc finalityVerifier(obj: ForkedLightClientFinalityUpdate): auto = + lightClientVerifier(obj) + + proc optimisticVerifier(obj: ForkedLightClientOptimisticUpdate): auto = + lightClientVerifier(obj) + + func isLightClientStoreInitialized(): bool = + lightClient.store[].kind > LightClientDataFork.None + + func isNextSyncCommitteeKnown(): bool = + withForkyStore(lightClient.store[]): + when lcDataFork > LightClientDataFork.None: + forkyStore.is_next_sync_committee_known + else: + false + + func getFinalizedSlot(): Slot = + withForkyStore(lightClient.store[]): + when lcDataFork > LightClientDataFork.None: + forkyStore.finalized_header.beacon.slot + else: + GENESIS_SLOT + + func getOptimisticSlot(): Slot = + withForkyStore(lightClient.store[]): + when lcDataFork > LightClientDataFork.None: + forkyStore.optimistic_header.beacon.slot + else: + GENESIS_SLOT + + lightClient.manager = LightClientManager.init( + rng, cfg.timeParams, getTrustedBlockRoot, bootstrapVerifier, updateVerifier, + finalityVerifier, optimisticVerifier, isLightClientStoreInitialized, + isNextSyncCommitteeKnown, getFinalizedSlot, getOptimisticSlot, getBeaconTime, + ) + + lightClient + +proc new*( + T: type LightClient, chain: Option[string], trustedBlockRoot: Option[Eth2Digest] +): T = + let metadata = loadEth2Network(chain) + + # just for short hand convenience + template cfg(): auto = + metadata.cfg + + # initialize beacon node genesis data, beacon clock and forkDigests + let + genesisState = + try: + template genesisData(): auto = + metadata.genesis.bakedBytes + + newClone( + readSszForkedHashedBeaconState( + cfg, genesisData.toOpenArray(genesisData.low, genesisData.high) + ) + ) + except CatchableError as err: + raiseAssert "Invalid baked-in state: " & err.msg + + # getStateField reads seeks info directly from a byte array + # get genesis time and instantiate the beacon clock + genesisTime = getStateField(genesisState[], genesis_time) + beaconClock = BeaconClock.init(cfg.timeParams, genesisTime).valueOr: + error "Invalid genesis time in state", genesisTime + quit QuitFailure + + # get the function that itself get the current beacon time + getBeaconTime = beaconClock.getBeaconTimeFn() + genesis_validators_root = getStateField(genesisState[], genesis_validators_root) + forkDigests = newClone ForkDigests.init(cfg, genesis_validators_root) + + rng = keys.newRng() + + # light client is set to optimistic finalization mode + lightClient = LightClient.new( + rng, cfg, forkDigests, getBeaconTime, genesis_validators_root, + LightClientFinalizationMode.Optimistic, + ) + + lightClient.trustedBlockRoot = trustedBlockRoot + lightClient + +proc setBackend*(lightClient: LightClient, backend: EthLCBackend) = + lightClient.manager.backend = backend + +proc start*(lightClient: LightClient) {.async: (raises: [CancelledError]).} = + info "Starting beacon light client", trusted_block_root = lightClient.trustedBlockRoot + await lightClient.manager.start() + +proc resetToFinalizedHeader*( + lightClient: LightClient, + header: ForkedLightClientHeader, + current_sync_committee: SyncCommittee, +) = + lightClient.processor[].resetToFinalizedHeader(header, current_sync_committee) diff --git a/nimbus_verified_proxy/lc/lc_manager.nim b/nimbus_verified_proxy/lc/lc_manager.nim new file mode 100644 index 0000000000..2da773d16a --- /dev/null +++ b/nimbus_verified_proxy/lc/lc_manager.nim @@ -0,0 +1,391 @@ +# nimbus_verified_proxy +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import chronos, chronicles +import + beacon_chain/beacon_clock, + beacon_chain/networking/peer_scores, + beacon_chain/sync/[light_client_sync_helpers, sync_manager] + +logScope: + topics = "lcman" + +const MAX_REQUEST_LIGHT_CLIENT_UPDATES = 128 + +type + Nothing = object + ResponseError = object of CatchableError + Endpoint[K, V] = (K, V) # https://github.com/nim-lang/Nim/issues/19531 + Bootstrap = Endpoint[Eth2Digest, ForkedLightClientBootstrap] + UpdatesByRange = Endpoint[ + tuple[startPeriod: SyncCommitteePeriod, count: uint64], ForkedLightClientUpdate + ] + FinalityUpdate = Endpoint[Nothing, ForkedLightClientFinalityUpdate] + OptimisticUpdate = Endpoint[Nothing, ForkedLightClientOptimisticUpdate] + + NetRes*[T] = Result[T, void] + ValueVerifier[V] = proc(v: V): Future[Result[void, LightClientVerifierError]] {. + async: (raises: [CancelledError]) + .} + BootstrapVerifier* = ValueVerifier[ForkedLightClientBootstrap] + UpdateVerifier* = ValueVerifier[ForkedLightClientUpdate] + FinalityUpdateVerifier* = ValueVerifier[ForkedLightClientFinalityUpdate] + OptimisticUpdateVerifier* = ValueVerifier[ForkedLightClientOptimisticUpdate] + + GetTrustedBlockRootCallback* = proc(): Option[Eth2Digest] {.gcsafe, raises: [].} + GetBoolCallback* = proc(): bool {.gcsafe, raises: [].} + GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].} + + LightClientUpdatesByRangeResponse* = NetRes[seq[ForkedLightClientUpdate]] + + LightClientBootstrapProc = proc( + id: uint64, blockRoot: Eth2Digest + ): Future[NetRes[ForkedLightClientBootstrap]] {.async: (raises: [CancelledError]).} + LightClientUpdatesByRangeProc = proc( + id: uint64, startPeriod: SyncCommitteePeriod, count: uint64 + ): Future[LightClientUpdatesByRangeResponse] {.async: (raises: [CancelledError]).} + LightClientFinalityUpdateProc = proc( + id: uint64 + ): Future[NetRes[ForkedLightClientFinalityUpdate]] {. + async: (raises: [CancelledError]) + .} + LightClientOptimisticUpdateProc = proc( + id: uint64 + ): Future[NetRes[ForkedLightClientOptimisticUpdate]] {. + async: (raises: [CancelledError]) + .} + UpdateScoreProc = proc(id: uint64, value: int) {.gcsafe, raises: [].} + + EthLCBackend* = object + getLightClientBootstrap*: LightClientBootstrapProc + getLightClientUpdatesByRange*: LightClientUpdatesByRangeProc + getLightClientFinalityUpdate*: LightClientFinalityUpdateProc + getLightClientOptimisticUpdate*: LightClientOptimisticUpdateProc + updateScore*: UpdateScoreProc + + LightClientManager* = object + rng: ref HmacDrbgContext + backend*: EthLCBackend + timeParams: TimeParams + getTrustedBlockRoot: GetTrustedBlockRootCallback + bootstrapVerifier: BootstrapVerifier + updateVerifier: UpdateVerifier + finalityUpdateVerifier: FinalityUpdateVerifier + optimisticUpdateVerifier: OptimisticUpdateVerifier + isLightClientStoreInitialized: GetBoolCallback + isNextSyncCommitteeKnown: GetBoolCallback + getFinalizedSlot: GetSlotCallback + getOptimisticSlot: GetSlotCallback + getBeaconTime: GetBeaconTimeFn + loopFuture: Future[void].Raising([CancelledError]) + +func init*( + T: type LightClientManager, + rng: ref HmacDrbgContext, + timeParams: TimeParams, + getTrustedBlockRoot: GetTrustedBlockRootCallback, + bootstrapVerifier: BootstrapVerifier, + updateVerifier: UpdateVerifier, + finalityUpdateVerifier: FinalityUpdateVerifier, + optimisticUpdateVerifier: OptimisticUpdateVerifier, + isLightClientStoreInitialized: GetBoolCallback, + isNextSyncCommitteeKnown: GetBoolCallback, + getFinalizedSlot: GetSlotCallback, + getOptimisticSlot: GetSlotCallback, + getBeaconTime: GetBeaconTimeFn, +): LightClientManager = + ## Initialize light client manager. + LightClientManager( + rng: rng, + timeParams: timeParams, + getTrustedBlockRoot: getTrustedBlockRoot, + bootstrapVerifier: bootstrapVerifier, + updateVerifier: updateVerifier, + finalityUpdateVerifier: finalityUpdateVerifier, + optimisticUpdateVerifier: optimisticUpdateVerifier, + isLightClientStoreInitialized: isLightClientStoreInitialized, + isNextSyncCommitteeKnown: isNextSyncCommitteeKnown, + getFinalizedSlot: getFinalizedSlot, + getOptimisticSlot: getOptimisticSlot, + getBeaconTime: getBeaconTime, + ) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.1/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap +proc doRequest( + e: typedesc[Bootstrap], backend: EthLCBackend, reqId: uint64, blockRoot: Eth2Digest +): Future[NetRes[ForkedLightClientBootstrap]] {. + async: (raises: [CancelledError], raw: true) +.} = + backend.getLightClientBootstrap(reqId, blockRoot) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.1/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange +proc doRequest( + e: typedesc[UpdatesByRange], + backend: EthLCBackend, + reqId: uint64, + key: tuple[startPeriod: SyncCommitteePeriod, count: uint64], +): Future[LightClientUpdatesByRangeResponse] {. + async: (raises: [ResponseError, CancelledError]) +.} = + let (startPeriod, count) = key + doAssert count > 0 and count <= MAX_REQUEST_LIGHT_CLIENT_UPDATES + let response = await backend.getLightClientUpdatesByRange(reqId, startPeriod, count) + if response.isOk: + let e = distinctBase(response.get).checkLightClientUpdates(startPeriod, count) + if e.isErr: + raise newException(ResponseError, e.error) + return response + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.1/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate +proc doRequest( + e: typedesc[FinalityUpdate], backend: EthLCBackend, reqId: uint64 +): Future[NetRes[ForkedLightClientFinalityUpdate]] {. + async: (raises: [CancelledError], raw: true) +.} = + backend.getLightClientFinalityUpdate(reqId) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.1/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate +proc doRequest( + e: typedesc[OptimisticUpdate], backend: EthLCBackend, reqId: uint64 +): Future[NetRes[ForkedLightClientOptimisticUpdate]] {. + async: (raises: [CancelledError], raw: true) +.} = + backend.getLightClientOptimisticUpdate(reqId) + +template valueVerifier[E]( + self: LightClientManager, e: typedesc[E] +): ValueVerifier[E.V] = + when E.V is ForkedLightClientBootstrap: + self.bootstrapVerifier + elif E.V is ForkedLightClientUpdate: + self.updateVerifier + elif E.V is ForkedLightClientFinalityUpdate: + self.finalityUpdateVerifier + elif E.V is ForkedLightClientOptimisticUpdate: + self.optimisticUpdateVerifier + else: + static: + doAssert false + +# NOTE: Do not export this iterator it is just for shorthand convenience +iterator values(v: auto): auto = + ## Local helper for `workerTask` to share the same implementation for both + ## scalar and aggregate values, by treating scalars as 1-length aggregates. + when v is seq: + for i in v: + yield i + else: + yield v + +proc workerTask[E]( + self: LightClientManager, e: typedesc[E], key: E.K +): Future[bool] {.async: (raises: [CancelledError]).} = + var + didProgress = false + reqId: uint64 + try: + self.rng[].generate(reqId) + + let value = + when E.K is Nothing: + await E.doRequest(self.backend, reqId) + else: + await E.doRequest(self.backend, reqId, key) + if value.isOk: + var applyReward = false + for val in value.get().values: + let res = await self.valueVerifier(E)(val) + if res.isErr: + case res.error + of LightClientVerifierError.MissingParent: + # Stop, requires different request to progress + return didProgress + of LightClientVerifierError.Duplicate: + # Ignore, a concurrent request may have already fulfilled this + when E.V is ForkedLightClientBootstrap: + didProgress = true + else: + discard + of LightClientVerifierError.UnviableFork: + # Descore, peer is on an incompatible fork version + withForkyObject(val): + when lcDataFork > LightClientDataFork.None: + notice "Received value from an unviable fork", + value = forkyObject, endpoint = E.name + else: + notice "Received value from an unviable fork", endpoint = E.name + self.backend.updateScore(reqId, PeerScoreUnviableFork) + return didProgress + of LightClientVerifierError.Invalid: + # Descore, received data is malformed + withForkyObject(val): + when lcDataFork > LightClientDataFork.None: + warn "Received invalid value", + value = forkyObject.shortLog, endpoint = E.name + else: + warn "Received invalid value", endpoint = E.name + self.backend.updateScore(reqId, PeerScoreBadValues) + return didProgress + else: + # Reward, peer returned something useful + applyReward = true + didProgress = true + if applyReward: + self.backend.updateScore(reqId, PeerScoreGoodValues) + else: + self.backend.updateScore(reqId, PeerScoreNoValues) + debug "Failed to receive value on request", value, endpoint = E.name + except ResponseError as exc: + self.backend.updateScore(reqId, PeerScoreBadValues) + warn "Received invalid response", error = exc.msg, endpoint = E.name + except CancelledError as exc: + raise exc + + return didProgress + +proc query[E]( + self: LightClientManager, e: typedesc[E], key: E.K +): Future[bool] {.async: (raises: [CancelledError]).} = + const NUM_WORKERS = 2 + var workers: array[NUM_WORKERS, Future[bool]] + + let progressFut = Future[void].Raising([CancelledError]).init("lcmanProgress") + var + numCompleted = 0 + success = false + maxCompleted = workers.len + + proc handleFinishedWorker(future: pointer) = + try: + let didProgress = cast[Future[bool]](future).read() + if didProgress and not progressFut.finished: + progressFut.complete() + success = true + except CatchableError: + discard + finally: + inc numCompleted + if numCompleted == maxCompleted: + progressFut.cancelSoon() + + # Start concurrent workers + for i in 0 ..< workers.len: + try: + workers[i] = self.workerTask(e, key) + workers[i].addCallback(handleFinishedWorker) + except CancelledError as exc: + raise exc + except CatchableError: + workers[i] = newFuture[bool]() + workers[i].complete(false) + + # Wait for any worker to report progress, or for all workers to finish + try: + waitFor progressFut + except CancelledError as e: + discard # cancellation only occurs when all workers have failed + + # cancel all workers + for i in 0 ..< NUM_WORKERS: + workers[i].cancelSoon() + + return success + +template query[E]( + self: LightClientManager, e: typedesc[E] +): Future[bool].Raising([CancelledError]) = + self.query(e, Nothing()) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.1/specs/altair/light-client/light-client.md#light-client-sync-process +proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = + # try atleast twice + let + NUM_RETRIES = 2 + RETRY_TIMEOUT = + chronos.seconds(int64(self.timeParams.SECONDS_PER_SLOT) div (NUM_RETRIES + 1)) + + while true: + let + wallTime = self.getBeaconTime() + current = wallTime.slotOrZero(self.timeParams) + finalized = self.getFinalizedSlot() + optimistic = self.getOptimisticSlot() + + # Obtain bootstrap data once a trusted block root is supplied + if not self.isLightClientStoreInitialized(): + let trustedBlockRoot = self.getTrustedBlockRoot() + + if trustedBlockRoot.isNone: + debug "TrustedBlockRoot unavaialble re-attempting bootstrap download" + await sleepAsync(RETRY_TIMEOUT) + continue + + let didProgress = await self.query(Bootstrap, trustedBlockRoot.get) + + if not didProgress: + debug "Re-attempting bootstrap download" + await sleepAsync(RETRY_TIMEOUT) + + continue + + # check and download sync committee updates + if finalized.sync_committee_period == optimistic.sync_committee_period and + not self.isNextSyncCommitteeKnown(): + if finalized.sync_committee_period >= current.sync_committee_period: + debug "Downloading light client sync committee updates", + start_period = finalized.sync_committee_period, count = 1 + discard await self.query( + UpdatesByRange, + (startPeriod: finalized.sync_committee_period, count: uint64(1)), + ) + else: + let count = min( + current.sync_committee_period - finalized.sync_committee_period, + MAX_REQUEST_LIGHT_CLIENT_UPDATES, + ) + debug "Downloading light client sync committee updates", + start_period = finalized.sync_committee_period, count = count + discard await self.query( + UpdatesByRange, + (startPeriod: finalized.sync_committee_period, count: uint64(count)), + ) + elif finalized.sync_committee_period + 1 < current.sync_committee_period: + let count = min( + current.sync_committee_period - (finalized.sync_committee_period + 1), + MAX_REQUEST_LIGHT_CLIENT_UPDATES, + ) + debug "Downloading light client sync committee updates", + start_period = finalized.sync_committee_period, count = count + discard await self.query( + UpdatesByRange, + (startPeriod: finalized.sync_committee_period, count: uint64(count)), + ) + + # check and download optimistic update + if optimistic < current: + debug "Downloading light client optimistic updates", slot = current + let didProgress = await self.query(OptimisticUpdate) + if not didProgress: + await sleepAsync(RETRY_TIMEOUT) + continue + + # check and download finality update + if current.epoch > finalized.epoch + 2: + debug "Downloading light client finality updates", slot = current + let didProgress = await self.query(FinalityUpdate) + if not didProgress: + await sleepAsync(RETRY_TIMEOUT) + continue + + # check for updates every slot + await sleepAsync(chronos.seconds(int64(self.timeParams.SECONDS_PER_SLOT))) + +proc start*(self: LightClientManager) {.async: (raises: [CancelledError]).} = + ## Start light client manager's loop. + await self.loop() diff --git a/nimbus_verified_proxy/lc_backend.nim b/nimbus_verified_proxy/lc_backend.nim new file mode 100644 index 0000000000..54c552fb98 --- /dev/null +++ b/nimbus_verified_proxy/lc_backend.nim @@ -0,0 +1,158 @@ +# nimbus_verified_proxy +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +import + stint, + chronos, + chronicles, + presto/client, + beacon_chain/spec/eth2_apis/rest_light_client_calls, + beacon_chain/spec/presets, + beacon_chain/spec/forks, + ./lc/lc_manager, + ./nimbus_verified_proxy_conf + +logScope: + topics = "LCRestClientPool" + +const + MaxMessageBodyBytes* = 128 * 1024 * 1024 # 128 MB (JSON encoded) + BASE_URL = "/eth/v1/beacon/light_client" + +type + LCRestClient = ref object + score: int + restClient: RestClientRef + + LCRestClientPool* = ref object + cfg: RuntimeConfig + forkDigests: ref ForkDigests + clients: seq[LCRestClient] + idMap: Table[uint64, LCRestClient] + urls: seq[string] + +func new*( + T: type LCRestClientPool, cfg: RuntimeConfig, forkDigests: ref ForkDigests +): LCRestClientPool = + LCRestClientPool(cfg: cfg, forkDigests: forkDigests, clients: @[]) + +proc addEndpoints*(pool: LCRestClientPool, urlList: UrlList) {.raises: [ValueError].} = + for endpoint in urlList.urls: + if endpoint in pool.urls: + continue + + let restClient = RestClientRef.new(endpoint).valueOr: + raise newException(ValueError, $error) + + pool.clients.add(LCRestClient(score: 0, restClient: restClient)) + pool.urls.add(endpoint) + +proc closeAll*(pool: LCRestClientPool) {.async: (raises: []).} = + for client in pool.clients: + await client.restClient.closeWait() + + pool.clients.setLen(0) + pool.urls.setLen(0) + +proc getClientForReqId(pool: LCRestClientPool, reqId: uint64): LCRestClient = + if pool.idMap.contains(reqId): + return pool.idMap.getOrDefault(reqId) + + let client = pool.clients[reqId mod pool.clients.lenu64] + pool.idMap[reqId] = client + + client + +proc getEthLCBackend*(pool: LCRestClientPool): EthLCBackend = + let + getLCBootstrapProc = proc( + reqId: uint64, blockRoot: Eth2Digest + ): Future[NetRes[ForkedLightClientBootstrap]] {.async: (raises: [CancelledError]).} = + let + client = pool.getClientForReqId(reqId) + res = + try: + await client.restClient.getLightClientBootstrap( + blockRoot, pool.cfg, pool.forkDigests + ) + except CancelledError as e: + raise e + except CatchableError as e: + return err() + + ok(res) + + getLCUpdatesProc = proc( + reqId: uint64, startPeriod: SyncCommitteePeriod, count: uint64 + ): Future[LightClientUpdatesByRangeResponse] {.async: (raises: [CancelledError]).} = + let + client = pool.getClientForReqId(reqId) + res = + try: + await client.restClient.getLightClientUpdatesByRange( + startPeriod, count, pool.cfg, pool.forkDigests + ) + except CancelledError as e: + raise e + except CatchableError as e: + return err() + + ok(res) + + getLCFinalityProc = proc( + reqId: uint64 + ): Future[NetRes[ForkedLightClientFinalityUpdate]] {. + async: (raises: [CancelledError]) + .} = + let + client = pool.getClientForReqId(reqId) + res = + try: + await client.restClient.getLightClientFinalityUpdate( + pool.cfg, pool.forkDigests + ) + except CancelledError as e: + raise e + except CatchableError as e: + return err() + + ok(res) + + getLCOptimisticProc = proc( + reqId: uint64 + ): Future[NetRes[ForkedLightClientOptimisticUpdate]] {. + async: (raises: [CancelledError]) + .} = + let + client = pool.getClientForReqId(reqId) + res = + try: + await client.restClient.getLightClientOptimisticUpdate( + pool.cfg, pool.forkDigests + ) + except CancelledError as e: + raise e + except CatchableError as e: + return err() + + ok(res) + + updateScoreProc = proc(reqId: uint64, value: int) = + let client = pool.getClientForReqId(reqId) + client.score += value + + pool.idMap.del(reqId) + + EthLCBackend( + getLightClientBootstrap: getLCBootstrapProc, + getLightClientUpdatesByRange: getLCUpdatesProc, + getLightClientFinalityUpdate: getLCFinalityProc, + getLightClientOptimisticUpdate: getLCOptimisticProc, + updateScore: updateScoreProc, + ) diff --git a/nimbus_verified_proxy/libverifproxy/example.c b/nimbus_verified_proxy/libverifproxy/example.c new file mode 100644 index 0000000000..1ff0d74e38 --- /dev/null +++ b/nimbus_verified_proxy/libverifproxy/example.c @@ -0,0 +1,70 @@ +/** + * nimbus_verified_proxy + * Copyright (c) 2025 Status Research & Development GmbH + * Licensed and distributed under either of + * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). + * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). + * at your option. This file may not be copied, modified, or distributed except according to those terms. + */ + +#include "./verifproxy.h" +#include +#include +#include + +static bool waitOver = true; + +void onBlockNumber(Context *ctx, int status, char *res) { + printf("Blocknumber: %s\n", res); + freeResponse(res); +} + +void onStart(Context *ctx, int status, char *res) { + printf("Verified Proxy started successfully\n"); + printf("status: %d\n", status); + printf("response: %s\n", res); + if (status < 0) stopVerifProxy(ctx); + freeResponse(res); +} + +void onBalance(Context *ctx, int status, char *res) { + printf("Balance: %s\n", res); + freeResponse(res); +} + +void waitIsOver(Context *ctx, int status, char *res) { + printf("waiting finished successfully\n"); + printf("status: %d\n", status); + + eth_blockNumber(ctx, onBlockNumber); + eth_getBalance(ctx, "0x954a86C613fd1fBaC9C7A43a071A68254C75E4AC", "latest", onBalance); + waitOver = true; + + freeResponse(res); +} + +int main() { + NimMain(); + Context *ctx = createAsyncTaskContext(); + + char* jsonConfig = + "{" + "\"eth2Network\": \"mainnet\"," + "\"trustedBlockRoot\": \"0xd9e4f5b2e7a8e50f9348a1890114ae522d3771ddfb44d8b7e7e2978c21869e91\"," + "\"backendUrl\": \"https://eth.blockrazor.xyz\"," + "\"beaconApiUrls\": \"http://testing.mainnet.beacon-api.nimbus.team,http://www.lightclientdata.org\"," + "\"logLevel\": \"FATAL\"," + "\"logStdout\": \"None\"" + "}"; + + startVerifProxy(ctx, jsonConfig, onStart); + + while(true) { + if (waitOver) { + waitOver = false; + nonBusySleep(ctx, 10, waitIsOver); + } + pollAsyncTaskEngine(ctx); + } + freeContext(ctx); +} diff --git a/nimbus_verified_proxy/libverifproxy/verifproxy.h b/nimbus_verified_proxy/libverifproxy/verifproxy.h index db92f37d01..762dce225f 100644 --- a/nimbus_verified_proxy/libverifproxy/verifproxy.h +++ b/nimbus_verified_proxy/libverifproxy/verifproxy.h @@ -1,6 +1,6 @@ /** * nimbus_verified_proxy - * Copyright (c) 2024 Status Research & Development GmbH + * Copyright (c) 2025 Status Research & Development GmbH * Licensed and distributed under either of * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,12 +10,43 @@ #ifndef __verifproxy__ #define __verifproxy__ -typedef struct VerifProxyContext VerifProxyContext; -typedef void (*onHeaderCallback)(const char* s, int t); +#ifdef __cplusplus +extern "C" { +#endif -void quit(void); +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif -VerifProxyContext* startVerifProxy(const char* configJson, onHeaderCallback onHeader); -void stopVerifProxy(VerifProxyContext*); +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if __has_attribute(warn_unused_result) +#define ETH_RESULT_USE_CHECK __attribute__((warn_unused_result)) +#else +#define ETH_RESULT_USE_CHECK +#endif + +void NimMain(void); + +typedef struct Context Context; + +ETH_RESULT_USE_CHECK Context *createAsyncTaskContext(); + +typedef void (*CallBackProc) (Context *ctx, int status, char *res); + +void eth_getBalance(Context *ctx, char *address, char *blockTag, CallBackProc onBalance); +void eth_blockNumber(Context *ctx, CallBackProc cb); +void freeResponse(char *res); +void freeContext(Context *ctx); +void nonBusySleep(Context *ctx, int secs, CallBackProc cb); +void startVerifProxy(Context *ctx, char* configJson, CallBackProc onstart); +void stopVerifProxy(Context *ctx); +void pollAsyncTaskEngine(Context *ctx); + +#ifdef __cplusplus +} +#endif #endif /* __verifproxy__ */ diff --git a/nimbus_verified_proxy/libverifproxy/verifproxy.nim b/nimbus_verified_proxy/libverifproxy/verifproxy.nim index 5a70b0cfad..4718c6dbbb 100644 --- a/nimbus_verified_proxy/libverifproxy/verifproxy.nim +++ b/nimbus_verified_proxy/libverifproxy/verifproxy.nim @@ -6,15 +6,39 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, json, net], + algorithm, + json_serialization, + chronos, eth/net/nat, + std/[atomics, locks, json, net, strutils], beacon_chain/spec/[digest, network], beacon_chain/nimbus_binary_common, + web3/[eth_api_types, conversions], + ../engine/types, + ../engine/engine, + ../lc/lc, + ../lc_backend, ../nimbus_verified_proxy, - ../nimbus_verified_proxy_conf + ../nimbus_verified_proxy_conf, + ../json_rpc_backend -proc quit*() {.exportc, dynlib.} = - echo "Quitting" +{.pragma: exported, cdecl, exportc, dynlib, raises: [].} +{.pragma: exportedConst, exportc, dynlib.} + +type + Task = ref object + status: int + response: string + finished: bool + cb: CallBackProc + + Context = object + tasks: seq[Task] + stop: bool + frontend: EthApiFrontend + + CallBackProc = + proc(ctx: ptr Context, status: int, res: cstring) {.cdecl, gcsafe, raises: [].} proc NimMain() {.importc, exportc, dynlib.} @@ -30,63 +54,261 @@ proc initLib() = locals = addr(locals) nimGC_setStackBottom(locals) -proc runContext(ctx: ptr Context) {.thread.} = - const defaultListenAddress = (static parseIpAddress("0.0.0.0")) - let str = $ctx.configJson - try: - let jsonNode = parseJson(str) - - let rpcAddr = jsonNode["RpcAddress"].getStr() - let myConfig = VerifiedProxyConf( - listenAddress: some(defaultListenAddress), - eth2Network: some(jsonNode["Eth2Network"].getStr()), - trustedBlockRoot: Eth2Digest.fromHex(jsonNode["TrustedBlockRoot"].getStr()), - backendUrl: parseCmdArg(Web3Url, jsonNode["Web3Url"].getStr()), - frontendUrl: parseCmdArg(Web3Url, jsonNode["Web3Url"].getStr()), - logLevel: jsonNode["LogLevel"].getStr(), - maxPeers: 160, - nat: NatConfig(hasExtIp: false, nat: NatAny), - logStdout: StdoutLogKind.Auto, - dataDirFlag: none(OutDir), - tcpPort: Port(defaultEth2TcpPort), - udpPort: Port(defaultEth2TcpPort), - agentString: "nimbus", - discv5Enabled: true, - ) +proc toUnmanagedPtr[T](x: ref T): ptr T = + GC_ref(x) + addr x[] - run(myConfig, ctx) - except Exception as err: - echo "Exception when running ", getCurrentExceptionMsg(), err.getStackTrace() - ctx.onHeader(getCurrentExceptionMsg(), 3) - ctx.cleanup() +func asRef[T](x: ptr T): ref T = + cast[ref T](x) + +proc destroy[T](x: ptr T) = + x[].reset() + GC_unref(asRef(x)) + +proc createAsyncTaskContext(): ptr Context {.exported.} = + let ctx = Context.new() + ctx.toUnmanagedPtr() + +proc createTask(cb: CallBackProc): Task = + let task = Task() + task.finished = false + task.cb = cb + task + +proc freeResponse(res: cstring) {.exported.} = + deallocShared(res) + +proc freeContext(ctx: ptr Context) {.exported.} = + ctx.destroy() + +proc alloc(str: string): cstring = + var ret = cast[cstring](allocShared(str.len + 1)) + let s = cast[seq[char]](str) + for i in 0 ..< str.len: + ret[i] = s[i] + ret[str.len] = '\0' + return ret + +# NOTE: this is not the C callback. This is just a callback for the future +template callbackToC(ctx: ptr Context, cb: CallBackProc, asyncCall: untyped) = + let task = createTask(cb) + ctx.tasks.add(task) + + let fut = asyncCall + + fut.addCallback proc(_: pointer) {.gcsafe.} = + if fut.cancelled(): + task.response = Json.encode(fut.error()) + task.finished = true + task.status = -2 + elif fut.failed(): + task.response = Json.encode(fut.error()) + task.finished = true + task.status = -1 + else: + task.response = Json.encode(fut.value()) + task.status = 0 + task.finished = true + +proc eth_blockNumber(ctx: ptr Context, cb: CallBackProc) {.exported.} = + callbackToC(ctx, cb): + ctx.frontend.eth_blockNumber() + +proc eth_getBalance( + ctx: ptr Context, address: cstring, blockTag: cstring, cb: CallBackProc +) {.exported.} = + let + addressTyped = + try: + Address.fromHex($address) + except ValueError as e: + cb(ctx, -3, alloc(e.msg)) + return + + blockTagTyped = + try: + BlockTag(kind: bidNumber, number: Quantity(parseBiggestUInt($blockTag))) + except ValueError: + BlockTag(kind: bidAlias, alias: $blockTag) + + callbackToC(ctx, cb): + ctx.frontend.eth_getBalance(addressTyped, blockTagTyped) + +proc pollAsyncTaskEngine(ctx: ptr Context) {.exported.} = + var delList: seq[int] = @[] + + let taskLen = ctx.tasks.len + for idx in 0 ..< taskLen: + let task = ctx.tasks[idx] + if task.finished: + task.cb(ctx, task.status, alloc(task.response)) + delList.add(idx) - #[let node = parseConfigAndRun(ctx.configJson) + # sequence changes as we delete so delting in descending order + for i in delList.sorted(SortOrder.Descending): + ctx.tasks.delete(i) - while not ctx[].stop: # and node.running: - let timeout = sleepAsync(100.millis) - waitFor timeout + if ctx.tasks.len > 0: + poll() - # do cleanup - node.stop()]# +proc load( + T: type VerifiedProxyConf, configJson: string +): T {.raises: [CatchableError, ValueError].} = + let jsonNode = parseJson($configJson) -proc startVerifProxy*( - configJson: cstring, onHeader: OnHeaderCallback -): ptr Context {.exportc, dynlib.} = - initLib() + let + eth2Network = some(jsonNode.getOrDefault("eth2Network").getStr("mainnet")) + trustedBlockRoot = + if jsonNode.contains("trustedBlockRoot"): + Eth2Digest.fromHex(jsonNode["trustedBlockRoot"].getStr()) + else: + raise + newException(ValueError, "`trustedBlockRoot` not specified in JSON config") + backendUrl = + if jsonNode.contains("backendUrl"): + parseCmdArg(Web3Url, jsonNode["backendUrl"].getStr()) + else: + raise newException(ValueError, "`backendUrl` not specified in JSON config") + beaconApiUrls = + if jsonNode.contains("beaconApiUrls"): + parseCmdArg(UrlList, jsonNode["beaconApiUrls"].getStr()) + else: + raise newException(ValueError, "`beaconApiUrls` not specified in JSON config") + logLevel = jsonNode.getOrDefault("logLevel").getStr("INFO") + logStdout = + case jsonNode.getOrDefault("logStdout").getStr("None") + of "Colors": StdoutLogKind.Colors + of "NoColors": StdoutLogKind.NoColors + of "Json": StdoutLogKind.Json + of "Auto": StdoutLogKind.Auto + else: StdoutLogKind.None + maxBlockWalk = jsonNode.getOrDefault("maxBlockWalk").getInt(1000) + headerStoreLen = jsonNode.getOrDefault("headerStoreLen").getInt(256) + storageCacheLen = jsonNode.getOrDefault("storageCacheLen").getInt(256) + codeCacheLen = jsonNode.getOrDefault("codeCacheLen").getInt(64) + accountCacheLen = jsonNode.getOrDefault("accountCacheLen").getInt(128) - let ctx = createShared(Context, 1) - ctx.configJson = cast[cstring](allocShared0(len(configJson) + 1)) - ctx.onHeader = onHeader - copyMem(ctx.configJson, configJson, len(configJson)) + return VerifiedProxyConf( + eth2Network: eth2Network, + trustedBlockRoot: trustedBlockRoot, + backendUrl: backendUrl, + beaconApiUrls: beaconApiUrls, + logLevel: logLevel, + logStdout: logStdout, + dataDirFlag: none(OutDir), + maxBlockWalk: uint64(maxBlockWalk), + headerStoreLen: headerStoreLen, + storageCacheLen: storageCacheLen, + codeCacheLen: codeCacheLen, + accountCacheLen: accountCacheLen, + ) +proc run( + ctx: ptr Context, configJson: string +) {.async: (raises: [ValueError, CancelledError, CatchableError]).} = try: - createThread(ctx.thread, runContext, ctx) + initLib() except Exception as err: - echo "Exception when attempting to invoke createThread ", - getCurrentExceptionMsg(), err.getStackTrace() - ctx.onHeader(getCurrentExceptionMsg(), 3) - ctx.cleanup() - return ctx + raise newException(CancelledError, err.msg) -proc stopVerifProxy*(ctx: ptr Context) {.exportc, dynlib.} = + let config = VerifiedProxyConf.load($configJson) + + setupLogging(config.logLevel, config.logStdout) + + let + engineConf = RpcVerificationEngineConf( + chainId: getConfiguredChainId(config.eth2Network), + maxBlockWalk: config.maxBlockWalk, + headerStoreLen: config.headerStoreLen, + accountCacheLen: config.accountCacheLen, + codeCacheLen: config.codeCacheLen, + storageCacheLen: config.storageCacheLen, + ) + engine = RpcVerificationEngine.init(engineConf) + lc = LightClient.new(config.eth2Network, some config.trustedBlockRoot) + + # initialize backend for JSON-RPC + jsonRpcClient = JsonRpcClient.init(config.backendUrl) + + # initialize backend for light client updates + lcRestClientPool = LCRestClientPool.new(lc.cfg, lc.forkDigests) + + # connect light client to LC by registering on header methods + # to use engine header store + connectLCToEngine(lc, engine) + + # add light client backend + lc.setBackend(lcRestClientPool.getEthLCBackend()) + + # the backend only needs the url to connect to + engine.backend = jsonRpcClient.getEthApiBackend() + + # inject the frontend into c context + ctx.frontend = engine.frontend + + # start backend + var status = await jsonRpcClient.start() + if status.isErr(): + raise newException(ValueError, status.error) + + # adding endpoints will also start the backend + lcRestClientPool.addEndpoints(config.beaconApiUrls) + + # this starts the light client manager which is + # an endless loop + await lc.start() + +# TODO: if frontend is accessed if this fails then it throws a sefault +# TODO: there is log leakage(at WARN level) even when logging is set to FATAL and stdout is set to None +proc startVerifProxy( + ctx: ptr Context, configJson: cstring, cb: CallBackProc +) {.exported.} = + let task = createTask(cb) + + ctx.tasks.add(task) + + let fut = run(ctx, $configJson) + + fut.addCallback proc(udata: pointer) {.gcsafe.} = + if fut.cancelled(): + task.response = Json.encode(fut.error()) + task.finished = true + task.status = -2 + elif fut.failed(): + task.response = Json.encode(fut.error()) + task.finished = true + task.status = -1 + else: + task.response = "success" #result is void hence we just provide a string + task.status = 0 + task.finished = true + +proc stopVerifProxy(ctx: ptr Context) {.exported.} = ctx.stop = true + +# C-callable: downloads a page and returns a heap-allocated C string. +proc nonBusySleep(ctx: ptr Context, secs: cint, cb: CallBackProc) {.exported.} = + let task = createTask(cb) + + ctx.tasks.add(task) + + let fut = sleepAsync((secs).seconds) + + fut.addCallback proc(_: pointer) {.gcsafe.} = + if fut.cancelled: + task.response = "cancelled" + task.finished = true + task.status = -2 + elif fut.failed(): + task.response = "failed" + task.finished = true + task.status = -1 + else: + try: + task.response = "slept" + task.status = 0 + except CatchableError as e: + task.response = e.msg + task.status = -1 + finally: + task.finished = true diff --git a/nimbus_verified_proxy/nimbus_verified_proxy.nim b/nimbus_verified_proxy/nimbus_verified_proxy.nim index 43460dc519..ccd8abad1c 100644 --- a/nimbus_verified_proxy/nimbus_verified_proxy.nim +++ b/nimbus_verified_proxy/nimbus_verified_proxy.nim @@ -14,33 +14,24 @@ import confutils, eth/common/[keys, eth_types_rlp], json_rpc/rpcproxy, - beacon_chain/gossip_processing/optimistic_processor, + beacon_chain/gossip_processing/light_client_processor, beacon_chain/networking/network_metadata, - beacon_chain/networking/topic_params, beacon_chain/spec/beaconstate, - beacon_chain/[beacon_clock, buildinfo, light_client, nimbus_binary_common], + beacon_chain/conf, + beacon_chain/[beacon_clock, buildinfo, nimbus_binary_common], ../execution_chain/common/common, ./nimbus_verified_proxy_conf, ./engine/engine, ./engine/header_store, ./engine/utils, ./engine/types, + ./lc/lc, + ./lc_backend, ./json_rpc_backend, ./json_rpc_frontend, ../execution_chain/version_info -type OnHeaderCallback* = proc(s: cstring, t: int) {.cdecl, raises: [], gcsafe.} -type Context* = object - thread*: Thread[ptr Context] - configJson*: cstring - stop*: bool - onHeader*: OnHeaderCallback - -proc cleanup*(ctx: ptr Context) = - dealloc(ctx.configJson) - freeShared(ctx) - -proc verifyChaindId( +proc verifyChainId( engine: RpcVerificationEngine ): Future[void] {.async: (raises: []).} = let providerId = @@ -57,129 +48,15 @@ proc verifyChaindId( expectedChain = engine.chainId, providerChain = providerId quit 1 -func getConfiguredChainId(networkMetadata: Eth2NetworkMetadata): UInt256 = - if networkMetadata.eth1Network.isSome(): - let - net = networkMetadata.eth1Network.get() - chainId = - case net - of mainnet: 1.u256 - of sepolia: 11155111.u256 - of hoodi: 560048.u256 - return chainId - else: - return networkMetadata.cfg.DEPOSIT_CHAIN_ID.u256 - -proc run*( - config: VerifiedProxyConf, ctx: ptr Context -) {.raises: [CatchableError], gcsafe.} = - {.gcsafe.}: - setupLogging(config.logLevel, config.logStdout) - - try: - notice "Launching Nimbus verified proxy", - version = fullVersionStr, cmdParams = commandLineParams(), config - except Exception: - notice "commandLineParams() exception" - - # load constants and metadata for the selected chain - let metadata = loadEth2Network(config.eth2Network) - - let - engineConf = RpcVerificationEngineConf( - chainId: getConfiguredChainId(metadata), - maxBlockWalk: config.maxBlockWalk, - headerStoreLen: config.headerStoreLen, - accountCacheLen: config.accountCacheLen, - codeCacheLen: config.codeCacheLen, - storageCacheLen: config.storageCacheLen, - ) - engine = RpcVerificationEngine.init(engineConf) - jsonRpcClient = JsonRpcClient.init(config.backendUrl) - jsonRpcServer = JsonRpcServer.init(config.frontendUrl) - - # the backend only needs the url to connect to - engine.backend = jsonRpcClient.getEthApiBackend() - - # inject frontend - jsonRpcServer.injectEngineFrontend(engine.frontend) - - # start frontend and backend - var status = waitFor jsonRpcClient.start() - if status.isErr(): - raise newException(ValueError, status.error) - - status = jsonRpcServer.start() - if status.isErr(): - raise newException(ValueError, status.error) - - # just for short hand convenience - template cfg(): auto = - metadata.cfg - - # initialize beacon node genesis data, beacon clock and forkDigests - let - genesisState = - try: - template genesisData(): auto = - metadata.genesis.bakedBytes - - newClone( - readSszForkedHashedBeaconState( - cfg, genesisData.toOpenArray(genesisData.low, genesisData.high) - ) - ) - except CatchableError as err: - raiseAssert "Invalid baked-in state: " & err.msg - - # getStateField reads seeks info directly from a byte array - # get genesis time and instantiate the beacon clock - genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(cfg.timeParams, genesisTime).valueOr: - error "Invalid genesis time in state", genesisTime - quit QuitFailure - - # get the function that itself get the current beacon time - getBeaconTime = beaconClock.getBeaconTimeFn() - - genesis_validators_root = getStateField(genesisState[], genesis_validators_root) - forkDigests = newClone ForkDigests.init(cfg, genesis_validators_root) - - genesisBlockRoot = get_initial_beacon_block(genesisState[]).root - - # transform the config to fit as a light client config and as a p2p node(Eth2Node) config - var lcConfig = config.asLightClientConf() - for node in metadata.bootstrapNodes: - lcConfig.bootstrapNodes.add node - - # create new network keys, create a p2p node(Eth2Node) and create a light client - let - rng = keys.newRng() - - netKeys = getRandomNetKeys(rng[]) - - network = createEth2Node( - rng, lcConfig, netKeys, cfg, forkDigests, getBeaconTime, genesis_validators_root - ) - - # light client is set to optimistic finalization mode - lightClient = createLightClient( - network, rng, lcConfig, cfg, forkDigests, getBeaconTime, genesis_validators_root, - LightClientFinalizationMode.Optimistic, - ) - - # registerbasic p2p protocols for maintaing peers ping/status/get_metadata/... etc. - network.registerProtocol( - PeerSync, - PeerSync.NetworkState.init(cfg, forkDigests, genesisBlockRoot, getBeaconTime), - ) - - # start the p2p network and rpcProxy - waitFor network.startListening() - waitFor network.start() - # verify chain id that the proxy is connected to - waitFor engine.verifyChaindId() +func getConfiguredChainId*(chain: Option[string]): UInt256 = + let net = chain.get("mainnet").toLowerAscii() + case net + of "mainnet": 1.u256 + of "sepolia": 11155111.u256 + of "hoodi": 560048.u256 + else: 1.u256 +proc connectLCToEngine*(lightClient: LightClient, engine: RpcVerificationEngine) = proc onFinalizedHeader( lightClient: LightClient, finalizedHeader: ForkedLightClientHeader ) = @@ -190,12 +67,6 @@ proc run*( if res.isErr(): error "finalized header update error", error = res.error() - - if ctx != nil: - try: - ctx.onHeader(cstring(Json.encode(forkyHeader)), 0) - except SerializationError as e: - error "finalizedHeaderCallback exception", error = e.msg else: error "pre-bellatrix light client headers do not have the execution payload header" @@ -209,107 +80,78 @@ proc run*( if res.isErr(): error "header store add error", error = res.error() - - if ctx != nil: - try: - ctx.onHeader(cstring(Json.encode(forkyHeader)), 1) - except SerializationError as e: - error "optimisticHeaderCallback exception", error = e.msg else: error "pre-bellatrix light client headers do not have the execution payload header" lightClient.onFinalizedHeader = onFinalizedHeader lightClient.onOptimisticHeader = onOptimisticHeader - lightClient.trustedBlockRoot = some config.trustedBlockRoot - lightClient.installMessageValidators() - - func shouldSyncOptimistically(wallSlot: Slot): bool = - let optimisticHeader = lightClient.optimisticHeader - withForkyHeader(optimisticHeader): - when lcDataFork > LightClientDataFork.None: - # Check whether light client has synced sufficiently close to wall slot - const maxAge = 2 * SLOTS_PER_EPOCH - forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge - else: - false - - var blocksGossipState: GossipState - proc updateBlocksGossipStatus(slot: Slot) = - let - isBehind = not shouldSyncOptimistically(slot) - targetGossipState = getTargetGossipState(slot.epoch, cfg, isBehind) +proc run( + config: VerifiedProxyConf +) {.async: (raises: [ValueError, CatchableError]), gcsafe.} = + {.gcsafe.}: + setupLogging(config.logLevel, config.logStdout) - template currentGossipState(): auto = - blocksGossipState + try: + notice "Launching Nimbus verified proxy", + version = FullVersionStr, cmdParams = commandLineParams(), config + except Exception: + notice "commandLineParams() exception" - if currentGossipState == targetGossipState: - return + let + engineConf = RpcVerificationEngineConf( + chainId: getConfiguredChainId(config.eth2Network), + maxBlockWalk: config.maxBlockWalk, + headerStoreLen: config.headerStoreLen, + accountCacheLen: config.accountCacheLen, + codeCacheLen: config.codeCacheLen, + storageCacheLen: config.storageCacheLen, + ) + engine = RpcVerificationEngine.init(engineConf) + lc = LightClient.new(config.eth2Network, some config.trustedBlockRoot) - if currentGossipState.card == 0 and targetGossipState.card > 0: - debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState - elif currentGossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling blocks topic subscriptions", wallSlot = slot - else: - # Individual forks added / removed - discard + #initialize frontend and backend for JSON-RPC + jsonRpcClient = JsonRpcClient.init(config.backendUrl) + jsonRpcServer = JsonRpcServer.init(config.frontendUrl) - let - newGossipEpochs = targetGossipState - currentGossipState - oldGossipEpochs = currentGossipState - targetGossipState + # initialize backend for light client updates + lcRestClientPool = LCRestClientPool.new(lc.cfg, lc.forkDigests) - for gossipEpoch in oldGossipEpochs: - let forkDigest = forkDigests[].atEpoch(gossipEpoch, cfg) - network.unsubscribe(getBeaconBlocksTopic(forkDigest)) + # connect light client to LC by registering on header methods + # to use engine header store + connectLCToEngine(lc, engine) + lc.trustedBlockRoot = some config.trustedBlockRoot - for gossipEpoch in newGossipEpochs: - let forkDigest = forkDigests[].atEpoch(gossipEpoch, cfg) - network.subscribe( - getBeaconBlocksTopic(forkDigest), - getBlockTopicParams(cfg.timeParams), - enableTopicMetrics = true, - ) + # add light client backend + lc.setBackend(lcRestClientPool.getEthLCBackend()) - blocksGossipState = targetGossipState + # the backend only needs the url of the RPC provider + engine.backend = jsonRpcClient.getEthApiBackend() + # inject frontend + jsonRpcServer.injectEngineFrontend(engine.frontend) - proc updateGossipStatus(time: Moment) = - let wallSlot = getBeaconTime().slotOrZero(cfg.timeParams) - updateBlocksGossipStatus(wallSlot + 1) - lightClient.updateGossipStatus(wallSlot + 1) + # start frontend and backend for JSON-RPC + var status = await jsonRpcClient.start() + if status.isErr(): + raise newException(ValueError, status.error) - # updates gossip status every second every second - proc runOnSecondLoop() {.async.} = - let sleepTime = chronos.seconds(1) - while true: - let start = chronos.now(chronos.Moment) - await chronos.sleepAsync(sleepTime) - let afterSleep = chronos.now(chronos.Moment) - let sleepTime = afterSleep - start - updateGossipStatus(start) - let finished = chronos.now(chronos.Moment) - let processingTime = finished - afterSleep - trace "onSecond task completed", sleepTime, processingTime + status = jsonRpcServer.start() + if status.isErr(): + raise newException(ValueError, status.error) - # update gossip status before starting the light client - updateGossipStatus(Moment.now()) - # start the light client - lightClient.start() + # adding endpoints will also start the backend + lcRestClientPool.addEndpoints(config.beaconApiUrls) - # launch a async routine - asyncSpawn runOnSecondLoop() + # verify chain id that the proxy is connected to + await engine.verifyChainId() - # run an infinite loop and wait for a stop signal - while true: - poll() - if ctx != nil and ctx.stop: - # Cleanup - waitFor network.stop() - waitFor jsonRpcClient.stop() - waitFor jsonRpcServer.stop() - ctx.cleanup() - # Notify client that cleanup is finished - ctx.onHeader(nil, 2) - break + # this starts the light client manager which is + # an endless loop + try: + await lc.start() + except CancelledError as e: + debug "light client cancelled" + raise e # noinline to keep it in stack traces proc main() {.noinline, raises: [CatchableError].} = @@ -322,7 +164,7 @@ proc main() {.noinline, raises: [CatchableError].} = writePanicLine error # Logging not yet set up quit QuitFailure - run(config, nil) + waitFor run(config) when isMainModule: main() diff --git a/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim b/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim index 3579a2fe2a..0825af6008 100644 --- a/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim +++ b/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim @@ -9,10 +9,12 @@ import std/os, + std/strutils, json_rpc/rpcproxy, # must be early (compilation annoyance) json_serialization/std/net, beacon_chain/conf_light_client, - beacon_chain/nimbus_binary_common + beacon_chain/nimbus_binary_common, + std/strutils export net @@ -25,37 +27,45 @@ type kind*: Web3UrlKind web3Url*: string + UrlList* = object + urls*: seq[string] + #!fmt: off type VerifiedProxyConf* = object # Config configFile* {. - desc: "Loads the configuration from a TOML file" - name: "config-file" .}: Option[InputFile] + desc: "Loads the configuration from a TOML file", + name: "config-file" + .}: Option[InputFile] # Logging logLevel* {. - desc: "Sets the log level" - defaultValue: "INFO" - name: "log-level" .}: string + desc: "Sets the log level", + defaultValue: "INFO", + name: "log-level" + .}: string logStdout* {. - hidden - desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)" - defaultValueDesc: "auto" - defaultValue: StdoutLogKind.Auto - name: "log-format" .}: StdoutLogKind + hidden, + desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)", + defaultValueDesc: "auto", + defaultValue: StdoutLogKind.Auto, + name: "log-format" + .}: StdoutLogKind # Storage dataDirFlag* {. - desc: "The directory where nimbus will store all blockchain data" - abbr: "d" - name: "data-dir" .}: Option[OutDir] + desc: "The directory where nimbus will store all blockchain data", + abbr: "d", + name: "data-dir" + .}: Option[OutDir] # Network eth2Network* {. - desc: "The Eth2 network to join" - defaultValueDesc: "mainnet" - name: "network" .}: Option[string] + desc: "The Eth2 network to join", + defaultValueDesc: "mainnet", + name: "network" + .}: Option[string] accountCacheLen* {. hidden, @@ -95,8 +105,9 @@ type VerifiedProxyConf* = object # Consensus light sync # No default - Needs to be provided by the user trustedBlockRoot* {. - desc: "Recent trusted finalized block root to initialize light client from" - name: "trusted-block-root" .}: Eth2Digest + desc: "Recent trusted finalized block root to initialize light client from", + name: "trusted-block-root" + .}: Eth2Digest # (Untrusted) web3 provider # No default - Needs to be provided by the user @@ -114,70 +125,12 @@ type VerifiedProxyConf* = object name: "frontend-url" .}: Web3Url - # Libp2p - bootstrapNodes* {. - desc: "Specifies one or more bootstrap nodes to use when connecting to the network" - abbr: "b" - name: "bootstrap-node" .}: seq[string] - - bootstrapNodesFile* {. - desc: "Specifies a line-delimited file of bootstrap Ethereum network addresses" - defaultValue: "" - name: "bootstrap-file" .}: InputFile - - listenAddress* {. - desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic" - name: "listen-address" .}: Option[IpAddress] - - tcpPort* {. - desc: "Listening TCP port for Ethereum LibP2P traffic" - defaultValue: defaultEth2TcpPort - defaultValueDesc: $defaultEth2TcpPortDesc - name: "tcp-port" .}: Port - - udpPort* {. - desc: "Listening UDP port for node discovery" - defaultValue: defaultEth2TcpPort - defaultValueDesc: $defaultEth2TcpPortDesc - name: "udp-port" .}: Port - - # TODO: Select a lower amount of peers. - maxPeers* {. - desc: "The target number of peers to connect to", - defaultValue: 160, # 5 (fanout) * 64 (subnets) / 2 (subs) for a healthy mesh - name: "max-peers" - .}: int - - hardMaxPeers* {. - desc: "The maximum number of peers to connect to. Defaults to maxPeers * 1.5" - name: "hard-max-peers" .}: Option[int] - - nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:" - defaultValue: NatConfig(hasExtIp: false, nat: NatAny) - defaultValueDesc: "any" - name: "nat" .}: NatConfig - - enrAutoUpdate* {. - desc: "Discovery can automatically update its ENR with the IP address " & - "and UDP port as seen by other nodes it communicates with. " & - "This option allows to enable/disable this functionality" - defaultValue: false - name: "enr-auto-update" .}: bool - - agentString* {. - defaultValue: "nimbus", - desc: "Node agent string which is used as identifier in the LibP2P network", - name: "agent-string" - .}: string - - discv5Enabled* {.desc: "Enable Discovery v5", defaultValue: true, name: "discv5".}: - bool - - directPeers* {. - desc: "The list of priviledged, secure and known peers to connect and maintain the connection to, this requires a not random netkey-file. In the complete multiaddress format like: /ip4/
/tcp//p2p/. Peering agreements are established out of band and must be reciprocal." - name: "direct-peer" .}: seq[string] + # (Untrusted) web3 provider + # No default - Needs to be provided by the user + beaconApiUrls* {. + desc: "command separated URLs of the light client data provider", + name: "external-beacon-api-urls" + .}: UrlList #!fmt: on @@ -195,34 +148,24 @@ proc parseCmdArg*(T: type Web3Url, p: string): T {.raises: [ValueError].} = ValueError, "Web3 url should have defined scheme (http/https/ws/wss)" ) +proc parseCmdArg*(T: type UrlList, p: string): T {.raises: [ValueError].} = + let urls = p.split(',') + + for u in urls: + let + parsed = parseUri(u) + normalizedScheme = parsed.scheme.toLowerAscii() + + if not (normalizedScheme == "http" or normalizedScheme == "https"): + raise newException(ValueError, "Light Client Endpoint should be a http(s) URL") + + UrlList(urls: urls) + proc completeCmdArg*(T: type Web3Url, val: string): seq[string] = - return @[] - -func asLightClientConf*(pc: VerifiedProxyConf): LightClientConf = - return LightClientConf( - configFile: pc.configFile, - logLevel: pc.logLevel, - logStdout: pc.logStdout, - logFile: none(OutFile), - dataDirFlag: pc.dataDirFlag, - eth2Network: pc.eth2Network, - bootstrapNodes: pc.bootstrapNodes, - bootstrapNodesFile: pc.bootstrapNodesFile, - listenAddress: pc.listenAddress, - tcpPort: pc.tcpPort, - udpPort: pc.udpPort, - maxPeers: pc.maxPeers, - hardMaxPeers: pc.hardMaxPeers, - nat: pc.nat, - enrAutoUpdate: pc.enrAutoUpdate, - agentString: pc.agentString, - discv5Enabled: pc.discv5Enabled, - directPeers: pc.directPeers, - trustedBlockRoot: pc.trustedBlockRoot, - web3Urls: @[EngineApiUrlConfigValue(url: pc.backendUrl.web3Url)], - jwtSecret: none(InputFile), - stopAtEpoch: 0, - ) + @[] + +proc completeCmdArg*(T: type UrlList, val: string): seq[string] = + @[] # TODO: Cannot use ClientConfig in VerifiedProxyConf due to the fact that # it contain `set[TLSFlags]` which does not have proper toml serialization