diff --git a/.gitmodules b/.gitmodules index 068376f93a..d4dc20fe33 100644 --- a/.gitmodules +++ b/.gitmodules @@ -200,7 +200,6 @@ [submodule "vendor/gnosis-chain-configs"] path = vendor/gnosis-chain-configs url = https://github.com/gnosischain/configs.git -[submodule "vendor/capella-testnets"] - path = vendor/capella-testnets +[submodule "vendor/withdrawals-testnets"] + path = vendor/withdrawals-testnets url = https://github.com/ethpandaops/withdrawals-testnet.git - branch = master diff --git a/Makefile b/Makefile index 325cf1cd91..b132dfea60 100644 --- a/Makefile +++ b/Makefile @@ -673,13 +673,13 @@ sepolia-dev-deposit: | sepolia-build deposit_contract clean-sepolia: $(call CLEAN_NETWORK,sepolia) -### Capella devnets +### Withdrawals testnets -capella-devnet-3: - tmuxinator start -p scripts/tmuxinator-el-cl-pair-in-devnet.yml network="vendor/capella-testnets/withdrawal-devnet-3/custom_config_data" +zhejiang: + tmuxinator start -p scripts/tmuxinator-el-cl-pair-in-devnet.yml network="vendor/withdrawals-testnets/zhejiang-testnet/custom_config_data" -clean-capella-devnet-3: - scripts/clean-devnet-dir.sh vendor/capella-testnets/withdrawal-devnet-3/custom_config_data +clean-zhejiang: + scripts/clean-devnet-dir.sh vendor/withdrawals-testnets/zhejiang-testnet/custom_config_data ### ### Gnosis chain binary diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 1043c573be..1644a7863b 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -68,7 +68,7 @@ type syncCommitteeMsgPool*: ref SyncCommitteeMsgPool lightClientPool*: ref LightClientPool validatorChangePool*: ref ValidatorChangePool - eth1Monitor*: Eth1Monitor + elManager*: ELManager payloadBuilderRestClient*: RestClientRef restServer*: RestServerRef keymanagerHost*: ref KeymanagerHost @@ -90,7 +90,6 @@ type restKeysCache*: Table[ValidatorPubKey, ValidatorIndex] validatorMonitor*: ref ValidatorMonitor stateTtlCache*: StateTtlCache - nextExchangeTransitionConfTime*: Moment router*: ref MessageRouter dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore externalBuilderRegistrations*: diff --git a/beacon_chain/beacon_node_light_client.nim b/beacon_chain/beacon_node_light_client.nim index f01e4e9ce6..4107845d1d 100644 --- a/beacon_chain/beacon_node_light_client.nim +++ b/beacon_chain/beacon_node_light_client.nim @@ -8,14 +8,12 @@ {.push raises: [].} import - chronicles, + chronicles, web3/engine_api_types, ./beacon_node logScope: topics = "beacnde" func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool = - if node.eth1Monitor == nil: - return false let optimisticHeader = node.lightClient.optimisticHeader withForkyHeader(optimisticHeader): when lcDataFork > LightClientDataFork.None: @@ -41,7 +39,7 @@ proc initLightClient*( let optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): - Future[void] {.async.} = + Future[void] {.async.} = info "New LC optimistic block", opt = signedBlock.toBlockId(), dag = node.dag.head.bid, @@ -51,10 +49,9 @@ proc initLightClient*( if blck.message.is_execution_block: template payload(): auto = blck.message.body.execution_payload - let eth1Monitor = node.eth1Monitor - if eth1Monitor != nil and not payload.block_hash.isZero: + if not payload.block_hash.isZero: # engine_newPayloadV1 - discard await eth1Monitor.newExecutionPayload(payload) + discard await node.elManager.newExecutionPayload(payload) # Retain optimistic head for other `forkchoiceUpdated` callers. # May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth: @@ -67,7 +64,7 @@ proc initLightClient*( # engine_forkchoiceUpdatedV1 let beaconHead = node.attestationPool[].getBeaconHead(nil) - discard await eth1Monitor.runForkchoiceUpdated( + discard await node.elManager.forkchoiceUpdated( headBlockHash = payload.block_hash, safeBlockHash = beaconHead.safeExecutionPayloadHash, finalizedBlockHash = beaconHead.finalizedExecutionPayloadHash) diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index 556d618109..3238ac6190 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -26,6 +26,7 @@ import ./spec/datatypes/base, ./networking/network_metadata, ./validators/slashing_protection_common, + ./eth1/el_conf, ./filepath from consensus_object_pools/block_pools_types_light_client @@ -35,7 +36,7 @@ export uri, nat, enr, defaultEth2TcpPort, enabledLogLevel, ValidIpAddress, defs, parseCmdArg, completeCmdArg, network_metadata, - network, BlockHashOrNumber, + el_conf, network, BlockHashOrNumber, confTomlDefs, confTomlNet, confTomlUri declareGauge network_name, "network name", ["name"] @@ -176,14 +177,17 @@ type name: "era-dir" .}: Option[InputDir] web3Urls* {. - desc: "One or more execution layer Web3 provider URLs" - name: "web3-url" .}: seq[string] + desc: "One or more execution layer Engine API URLs" + name: "web3-url" .}: seq[EngineApiUrlConfigValue] - web3ForcePolling* {. - hidden + elUrls* {. + desc: "One or more execution layer Engine API URLs" + name: "el" .}: seq[EngineApiUrlConfigValue] + + noEl* {. defaultValue: false - desc: "Force the use of polling when determining the head block of Eth1" - name: "web3-force-polling" .}: bool + desc: "Don't use an EL. The node will remain optimistically synced and won't be able to perform validator duties" + name: "no-el" .}: bool optimistic* {. hidden # deprecated > 22.12 @@ -234,7 +238,7 @@ type # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/authentication.md#key-distribution jwtSecret* {. desc: "A file containing the hex-encoded 256 bit secret key to be used for verifying/generating JWT tokens" - name: "jwt-secret" .}: Option[string] + name: "jwt-secret" .}: Option[InputFile] case cmd* {. command @@ -1302,7 +1306,7 @@ func defaultFeeRecipient*(conf: AnyConf): Eth1Address = proc loadJwtSecret*( rng: var HmacDrbgContext, dataDir: string, - jwtSecret: Option[string], + jwtSecret: Option[InputFile], allowCreate: bool): Option[seq[byte]] = # Some Web3 endpoints aren't compatible with JWT, but if explicitly chosen, # use it regardless. @@ -1317,8 +1321,18 @@ proc loadJwtSecret*( else: none(seq[byte]) -template loadJwtSecret*( +proc loadJwtSecret*( rng: var HmacDrbgContext, config: BeaconNodeConf, allowCreate: bool): Option[seq[byte]] = rng.loadJwtSecret(string(config.dataDir), config.jwtSecret, allowCreate) + +proc engineApiUrls*(config: BeaconNodeConf): seq[EngineApiUrl] = + let elUrls = if config.noEl: + return newSeq[EngineApiUrl]() + elif config.elUrls.len == 0 and config.web3Urls.len == 0: + @[defaultEngineApiUrl] + else: + config.elUrls + + (elUrls & config.web3Urls).toFinalEngineApiUrls(config.jwtSecret) diff --git a/beacon_chain/conf_light_client.nim b/beacon_chain/conf_light_client.nim index c9b3d59a3a..6a61a62a6a 100644 --- a/beacon_chain/conf_light_client.nim +++ b/beacon_chain/conf_light_client.nim @@ -123,12 +123,21 @@ type LightClientConf* = object # Execution layer web3Urls* {. - desc: "One or more execution layer Web3 provider URLs" - name: "web3-url" .}: seq[string] + desc: "One or more execution layer Engine API URLs" + name: "web3-url" .}: seq[EngineApiUrlConfigValue] + + elUrls* {. + desc: "One or more execution layer Engine API URLs" + name: "el" .}: seq[EngineApiUrlConfigValue] + + noEl* {. + defaultValue: false + desc: "Don't use an EL. The node will remain optimistically synced and won't be able to perform validator duties" + name: "no-el" .}: bool jwtSecret* {. desc: "A file containing the hex-encoded 256 bit secret key to be used for verifying/generating JWT tokens" - name: "jwt-secret" .}: Option[string] + name: "jwt-secret" .}: Option[InputFile] # Testing stopAtEpoch* {. @@ -145,3 +154,13 @@ template loadJwtSecret*( config: LightClientConf, allowCreate: bool): Option[seq[byte]] = rng.loadJwtSecret(string(config.dataDir), config.jwtSecret, allowCreate) + +proc engineApiUrls*(config: LightClientConf): seq[EngineApiUrl] = + let elUrls = if config.noEl: + return newSeq[EngineApiUrl]() + elif config.elUrls.len == 0 and config.web3Urls.len == 0: + @[defaultEngineApiUrl] + else: + config.elUrls + + (elUrls & config.web3Urls).toFinalEngineApiUrls(config.jwtSecret) diff --git a/beacon_chain/consensus_object_pools/consensus_manager.nim b/beacon_chain/consensus_object_pools/consensus_manager.nim index 3c27fa5b6a..dae559fd73 100644 --- a/beacon_chain/consensus_object_pools/consensus_manager.nim +++ b/beacon_chain/consensus_object_pools/consensus_manager.nim @@ -8,10 +8,11 @@ {.push raises: [].} import - chronicles, chronos, + chronicles, chronos, web3/[ethtypes, engine_api_types], ../spec/datatypes/base, ../consensus_object_pools/[blockchain_dag, block_quarantine, attestation_pool], - ../eth1/eth1_monitor + ../eth1/eth1_monitor, + ../beacon_clock from ../spec/beaconstate import get_expected_withdrawals from ../spec/datatypes/capella import Withdrawal @@ -22,15 +23,6 @@ from ../validators/keystore_management import from ../validators/action_tracker import ActionTracker, getNextProposalSlot type - ForkChoiceUpdatedInformation* = object - payloadId*: PayloadID - headBlockRoot*: Eth2Digest - safeBlockRoot*: Eth2Digest - finalizedBlockRoot*: Eth2Digest - timestamp*: uint64 - feeRecipient*: Eth1Address - withdrawals*: Opt[seq[Withdrawal]] - ConsensusManager* = object expectedSlot: Slot expectedBlockReceived: Future[bool] @@ -46,7 +38,7 @@ type # Execution layer integration # ---------------------------------------------------------------- - eth1Monitor*: Eth1Monitor + elManager*: ELManager # Allow determination of whether there's an upcoming proposal # ---------------------------------------------------------------- @@ -61,7 +53,6 @@ type # Tracking last proposal forkchoiceUpdated payload information # ---------------------------------------------------------------- - forkchoiceUpdatedInfo*: Opt[ForkchoiceUpdatedInformation] optimisticHead: tuple[bid: BlockId, execution_block_hash: Eth2Digest] # Initialization @@ -71,7 +62,7 @@ func new*(T: type ConsensusManager, dag: ChainDAGRef, attestationPool: ref AttestationPool, quarantine: ref Quarantine, - eth1Monitor: Eth1Monitor, + elManager: ELManager, actionTracker: ActionTracker, dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore, validatorsDir: string, @@ -82,11 +73,10 @@ func new*(T: type ConsensusManager, dag: dag, attestationPool: attestationPool, quarantine: quarantine, - eth1Monitor: eth1Monitor, + elManager: elManager, actionTracker: actionTracker, dynamicFeeRecipientsStore: dynamicFeeRecipientsStore, validatorsDir: validatorsDir, - forkchoiceUpdatedInfo: Opt.none ForkchoiceUpdatedInformation, defaultFeeRecipient: defaultFeeRecipient, defaultGasLimit: defaultGasLimit ) @@ -124,7 +114,8 @@ proc expectBlock*(self: var ConsensusManager, expectedSlot: Slot): Future[bool] from eth/async_utils import awaitWithTimeout from web3/engine_api_types import - ForkchoiceUpdatedResponse, PayloadExecutionStatus, PayloadStatusV1 + ForkchoiceUpdatedResponse, + PayloadExecutionStatus, PayloadStatusV1, PayloadAttributesV1 func `$`(h: BlockHash): string = $h.asEth2Digest @@ -146,8 +137,6 @@ func shouldSyncOptimistically*( true func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool = - if self.eth1Monitor == nil: - return false if self.optimisticHead.execution_block_hash.isZero: return false @@ -167,62 +156,8 @@ func setOptimisticHead*( bid: BlockId, execution_block_hash: Eth2Digest) = self.optimisticHead = (bid: bid, execution_block_hash: execution_block_hash) -proc runForkchoiceUpdated*( - eth1Monitor: Eth1Monitor, - headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest): - Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} = - # Allow finalizedBlockHash to be 0 to avoid sync deadlocks. - # - # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events - # has "Before the first finalized block occurs in the system the finalized - # block hash provided by this event is stubbed with - # `0x0000000000000000000000000000000000000000000000000000000000000000`." - # and - # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/bellatrix/validator.md#executionpayload - # notes "`finalized_block_hash` is the hash of the latest finalized execution - # payload (`Hash32()` if none yet finalized)" - doAssert not headBlockHash.isZero - - try: - # Minimize window for Eth1 monitor to shut down connection - await eth1Monitor.ensureDataProvider() - - let fcuR = awaitWithTimeout( - forkchoiceUpdated( - eth1Monitor, headBlockHash, safeBlockHash, finalizedBlockHash), - FORKCHOICEUPDATED_TIMEOUT): - debug "runForkchoiceUpdated: forkchoiceUpdated timed out", - headBlockHash = shortLog(headBlockHash), - safeBlockHash = shortLog(safeBlockHash), - finalizedBlockHash = shortLog(finalizedBlockHash) - ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1( - status: PayloadExecutionStatus.syncing)) - - debug "runForkchoiceUpdated: ran forkchoiceUpdated", - headBlockHash, safeBlockHash, finalizedBlockHash, - payloadStatus = $fcuR.payloadStatus.status, - latestValidHash = $fcuR.payloadStatus.latestValidHash, - validationError = $fcuR.payloadStatus.validationError - - return (fcuR.payloadStatus.status, fcuR.payloadStatus.latestValidHash) - except CatchableError as err: - warn "forkchoiceUpdated failed - check execution client", - err = err.msg, - headBlockHash = shortLog(headBlockHash), - safeBlockHash = shortLog(safeBlockHash), - finalizedBlockHash = shortLog(finalizedBlockHash) - return (PayloadExecutionStatus.syncing, none BlockHash) - -from ../beacon_clock import GetBeaconTimeFn -from ../fork_choice/fork_choice import mark_root_invalid - -proc updateExecutionClientHead( - self: ref ConsensusManager, newHead: BeaconHead): - Future[Opt[void]] {.async.} = - if self.eth1Monitor.isNil: - return Opt[void].ok() - +proc updateExecutionClientHead(self: ref ConsensusManager, + newHead: BeaconHead): Future[Opt[void]] {.async.} = let headExecutionPayloadHash = self.dag.loadExecutionBlockRoot(newHead.blck) if headExecutionPayloadHash.isZero: @@ -232,7 +167,7 @@ proc updateExecutionClientHead( # Can't use dag.head here because it hasn't been updated yet let (payloadExecutionStatus, latestValidHash) = - await self.eth1Monitor.runForkchoiceUpdated( + await self.elManager.forkchoiceUpdated( headExecutionPayloadHash, newHead.safeExecutionPayloadHash, newHead.finalizedExecutionPayloadHash) @@ -400,34 +335,37 @@ proc runProposalForkchoiceUpdated*( else: Opt.none(seq[Withdrawal]) beaconHead = self.attestationPool[].getBeaconHead(self.dag.head) - headBlockRoot = self.dag.loadExecutionBlockRoot(beaconHead.blck) + headBlockHash = self.dag.loadExecutionBlockRoot(beaconHead.blck) - if headBlockRoot.isZero: + if headBlockHash.isZero: return + let + payloadAttributes = withState(self.dag.headState): + when stateFork >= ConsensusFork.Capella: + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v2, + v2: PayloadAttributesV2( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData, + suggestedFeeRecipient: feeRecipient, + withdrawals: toEngineWithdrawals get_expected_withdrawals(forkyState.data))) + else: + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v1, + v1: PayloadAttributesV1( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData, + suggestedFeeRecipient: feeRecipient)) try: - let fcResult = awaitWithTimeout( - forkchoiceUpdated( - self.eth1Monitor, headBlockRoot, beaconHead.safeExecutionPayloadHash, - beaconHead.finalizedExecutionPayloadHash, timestamp, randomData, - feeRecipient, withdrawals), - FORKCHOICEUPDATED_TIMEOUT): - debug "runProposalForkchoiceUpdated: forkchoiceUpdated timed out" - ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - - if fcResult.payloadStatus.status != PayloadExecutionStatus.valid or - fcResult.payloadId.isNone: - return - - self.forkchoiceUpdatedInfo = Opt.some ForkchoiceUpdatedInformation( - payloadId: bellatrix.PayloadID(fcResult.payloadId.get), - headBlockRoot: headBlockRoot, - safeBlockRoot: beaconHead.safeExecutionPayloadHash, - finalizedBlockRoot: beaconHead.finalizedExecutionPayloadHash, - timestamp: timestamp, - feeRecipient: feeRecipient, - withdrawals: withdrawals) + let + safeBlockHash = beaconHead.safeExecutionPayloadHash + (status, _) = await self.elManager.forkchoiceUpdated( + headBlockHash, + safeBlockHash, + beaconHead.finalizedExecutionPayloadHash, + payloadAttributes = payloadAttributes) + debug "Fork-choice updated for proposal", status except CatchableError as err: error "Engine API fork-choice update failed", err = err.msg diff --git a/beacon_chain/eth1/deposit_contract.nim b/beacon_chain/eth1/deposit_contract.nim index efcc3f4c84..cd7c3037be 100644 --- a/beacon_chain/eth1/deposit_contract.nim +++ b/beacon_chain/eth1/deposit_contract.nim @@ -209,6 +209,13 @@ proc main() {.async.} = mnemonic = generateMnemonic(rng[]) seed = getSeed(mnemonic, KeystorePass.init "") cfg = getRuntimeConfig(conf.eth2Network) + threshold = if conf.remoteSignersUrls.len > 0: conf.threshold + else: 0 + + if conf.remoteValidatorsCount > 0 and + conf.remoteSignersUrls.len == 0: + fatal "Please specify at least one remote signer URL" + quit 1 if (let res = secureCreatePath(string conf.outValidatorsDir); res.isErr): warn "Could not create validators folder", @@ -226,7 +233,7 @@ proc main() {.async.} = string conf.outValidatorsDir, string conf.outSecretsDir, conf.remoteSignersUrls, - conf.threshold, + threshold, conf.remoteValidatorsCount, KeystoreMode.Fast) diff --git a/beacon_chain/eth1/el_conf.nim b/beacon_chain/eth1/el_conf.nim new file mode 100644 index 0000000000..c3aefa9fc3 --- /dev/null +++ b/beacon_chain/eth1/el_conf.nim @@ -0,0 +1,174 @@ +import + std/[options, strutils, uri], + stew/results, chronicles, confutils, + json_serialization, # for logging + toml_serialization, toml_serialization/lexer, + ../spec/engine_authentication + +type + EngineApiRole* = enum + DepositSyncing = "sync-deposits" + BlockValidation = "validate-blocks" + BlockProduction = "produce-blocks" + + EngineApiRoles* = set[EngineApiRole] + + EngineApiUrl* = object + url: string + jwtSecret: Option[seq[byte]] + roles: EngineApiRoles + + EngineApiUrlConfigValue* = object + url*: string # TODO: Use the URI type here + jwtSecret*: Option[string] + jwtSecretFile*: Option[InputFile] + roles*: Option[EngineApiRoles] + +const + defaultEngineApiRoles* = { DepositSyncing, BlockValidation, BlockProduction } + + # https://github.com/ethereum/execution-apis/pull/302 + defaultJwtSecret* = "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" + + defaultEngineApiUrl* = EngineApiUrlConfigValue( + url: "http://127.0.0.1:8551", + jwtSecret: some defaultJwtSecret) + +chronicles.formatIt EngineApiUrl: + it.url + +proc init*(T: type EngineApiUrl, + url: string, + jwtSecret = none seq[byte], + roles = defaultEngineApiRoles): T = + T(url: url, jwtSecret: jwtSecret, roles: roles) + +func url*(engineUrl: EngineApiUrl): string = + engineUrl.url + +func jwtSecret*(engineUrl: EngineApiUrl): Option[seq[byte]] = + engineUrl.jwtSecret + +func roles*(engineUrl: EngineApiUrl): EngineApiRoles = + engineUrl.roles + +func unknownRoleMsg(role: string): string = + "'" & role & "' is not a valid EL function" + +template raiseError(reader: var TomlReader, msg: string) = + raiseTomlErr(reader.lex, msg) + +template raiseError(reader: var JsonReader, msg: string) = + raiseTomlErr(reader.lex, msg) + +proc readValue*(reader: var TomlReader, value: var EngineApiRoles) + {.raises: [Defect, SerializationError, IOError].} = + let roles = reader.readValue seq[string] + if roles.len == 0: + reader.raiseError "At least one role should be provided" + for role in roles: + case role.toLowerAscii + of $DepositSyncing: + value.incl DepositSyncing + of $BlockValidation: + value.incl BlockValidation + of $BlockProduction: + value.incl BlockProduction + else: + reader.raiseError(unknownRoleMsg role) + +proc writeValue*(writer: var JsonWriter, roles: EngineApiRoles) + {.raises: [Defect, SerializationError, IOError].} = + var strRoles: seq[string] + + for role in EngineApiRole: + if role in roles: strRoles.add $role + + writer.writeValue strRoles + +proc parseCmdArg*(T: type EngineApiUrlConfigValue, input: string): T + {.raises: [ValueError, Defect].} = + var + uri = parseUri(input) + jwtSecret: Option[string] + jwtSecretFile: Option[InputFile] + roles: Option[EngineApiRoles] + + if uri.anchor != "": + for key, value in decodeQuery(uri.anchor): + case key + of "jwtSecret": + jwtSecret = some value + of "jwtSecretFile": + jwtSecretFile = some InputFile.parseCmdArg(value) + of "roles": + var uriRoles: EngineApiRoles = {} + for role in split(value, ","): + case role.toLowerAscii + of $DepositSyncing: + uriRoles.incl DepositSyncing + of $BlockValidation: + uriRoles.incl BlockValidation + of $BlockProduction: + uriRoles.incl BlockProduction + else: + raise newException(ValueError, unknownRoleMsg role) + if uriRoles == {}: + raise newException(ValueError, "The list of roles should not be empty") + roles = some uriRoles + else: + raise newException(ValueError, "'" & key & "' is not a recognized Engine URL property") + uri.anchor = "" + + EngineApiUrlConfigValue( + url: $uri, + jwtSecret: jwtSecret, + jwtSecretFile: jwtSecretFile, + roles: roles) + +proc toFinalUrl*(confValue: EngineApiUrlConfigValue, + confJwtSecret: Option[seq[byte]]): Result[EngineApiUrl, cstring] = + if confValue.jwtSecret.isSome and confValue.jwtSecretFile.isSome: + return err "The options `jwtSecret` and `jwtSecretFile` should not be specified together" + + let jwtSecret = if confValue.jwtSecret.isSome: + some(? parseJwtTokenValue(confValue.jwtSecret.get)) + elif confValue.jwtSecretFile.isSome: + some(? loadJwtSecretFile(confValue.jwtSecretFile.get)) + else: + confJwtSecret + + ok EngineApiUrl.init( + url = confValue.url, + jwtSecret = jwtSecret, + roles = confValue.roles.get(defaultEngineApiRoles)) + +proc loadJwtSecret*(jwtSecret: Option[InputFile]): Option[seq[byte]] = + if jwtSecret.isSome: + let res = loadJwtSecretFile(jwtSecret.get) + if res.isOk: + some res.value + else: + fatal "Failed to load JWT secret file", err = res.error + quit 1 + else: + none seq[byte] + +proc toFinalEngineApiUrls*(elUrls: seq[EngineApiUrlConfigValue], + confJwtSecret: Option[InputFile]): seq[EngineApiUrl] = + let jwtSecret = loadJwtSecret confJwtSecret + + for elUrl in elUrls: + let engineApiUrl = elUrl.toFinalUrl(jwtSecret).valueOr: + fatal "Invalid EL configuration", err = error + quit 1 + result.add engineApiUrl + +proc fixupWeb3Urls*(web3Url: var string) = + var normalizedUrl = toLowerAscii(web3Url) + if not (normalizedUrl.startsWith("https://") or + normalizedUrl.startsWith("http://") or + normalizedUrl.startsWith("wss://") or + normalizedUrl.startsWith("ws://")): + warn "The Web3 URL does not specify a protocol. Assuming a WebSocket server", web3Url + web3Url = "ws://" & web3Url diff --git a/beacon_chain/eth1/eth1_monitor.nim b/beacon_chain/eth1/eth1_monitor.nim index cb10717dc7..385c4e73ae 100644 --- a/beacon_chain/eth1/eth1_monitor.nim +++ b/beacon_chain/eth1/eth1_monitor.nim @@ -12,25 +12,26 @@ import typetraits, uri, json], # Nimble packages: chronos, metrics, chronicles/timings, stint/endians2, - web3, web3/ethtypes as web3Types, web3/ethhexstrings, web3/engine_api, - eth/common/eth_types, + json_rpc/[client, errors], + web3, web3/ethhexstrings, web3/engine_api, + eth/common/[eth_types, transaction], eth/async_utils, stew/[byteutils, objects, results, shims/hashes], # Local modules: ../spec/[deposit_snapshots, eth2_merkleization, forks, helpers], ../spec/datatypes/[base, phase0, bellatrix, deneb], ../networking/network_metadata, ../consensus_object_pools/block_pools_types, - ".."/[beacon_chain_db, beacon_node_status, beacon_clock], - ./merkle_minimal + ".."/[beacon_chain_db, beacon_node_status, beacon_clock, future_combinators], + "."/[merkle_minimal, el_conf] from std/times import getTime, inSeconds, initTime, `-` from ../spec/engine_authentication import getSignedIatToken export - web3Types, deques, base, DepositTreeSnapshot + el_conf, engine_api, deques, base, DepositTreeSnapshot logScope: - topics = "eth1" + topics = "elmon" type PubKeyBytes = DynamicBytes[48, 48] @@ -54,15 +55,24 @@ contract(DepositContract): index: Int64LeBytes) {.event.} const - web3Timeouts = 60.seconds hasDepositRootChecks = defined(has_deposit_root_checks) targetBlocksPerLogsRequest = 5000'u64 # This is roughly a day of Eth1 blocks + # Engine API timeouts + engineApiConnectionTimeout = 5.seconds # How much we wait before giving up connecting to the Engine API + web3RequestsTimeout* = 8.seconds # How much we wait for eth_* requests (e.g. eth_getBlockByHash) + + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/specification.md#request-2 + GETPAYLOAD_TIMEOUT = 1.seconds + + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/experimental/blob-extension.md#engine_getblobsbundlev1 + GETBLOBS_TIMEOUT = 1.seconds + type Eth1BlockNumber* = uint64 Eth1BlockTimestamp* = uint64 - Eth1BlockHeader = web3Types.BlockHeader + Eth1BlockHeader = engine_api.BlockHeader GenesisStateRef = ref phase0.BeaconState @@ -102,44 +112,96 @@ type hasConsensusViolation: bool ## The local chain contradicts the observed consensus on the network - Eth1MonitorState = enum - Initialized - Started - ReadyToRestartToPrimary - Failed - Stopping - Stopped - - Eth1Monitor* = ref object - state: Eth1MonitorState - startIdx: int - web3Urls: seq[string] + ForkedPayloadAttributesKind* {.pure.} = enum + v1 + v2 + + ForkedPayloadAttributes* = ref object + case kind*: ForkedPayloadAttributesKind + of ForkedPayloadAttributesKind.v1: + v1*: PayloadAttributesV1 + of ForkedPayloadAttributesKind.v2: + v2*: PayloadAttributesV2 + + NextExpectedPayloadParams* = object + headBlockHash*: Eth2Digest + safeBlockHash*: Eth2Digest + finalizedBlockHash*: Eth2Digest + payloadAttributes: ForkedPayloadAttributes + + ELManager* = ref object eth1Network: Option[Eth1Network] + ## If this value is supplied the EL monitor will check whether + ## all configured EL nodes are connected to the same network. + depositContractAddress*: Eth1Address - depositContractDeployedAt: BlockHashOrNumber - forcePolling: bool - jwtSecret: Option[seq[byte]] + depositContractBlockNumber: uint64 + depositContractBlockHash: BlockHash + blocksPerLogsRequest: uint64 + ## This value is used to dynamically adjust the number of + ## blocks we are trying to download at once during deposit + ## syncing. By default, the value is set to the constant + ## `targetBlocksPerLogsRequest`, but if the EL is failing + ## to serve this number of blocks per single `eth_getLogs` + ## request, we temporarily lower the value until the request + ## succeeds. The failures are generally expected only in + ## periods in the history for very high deposit density. + + elConnections: seq[ELConnection] + ## All active EL connections + + eth1Chain: Eth1Chain + ## At larger distances, this chain consists of all blocks + ## with deposits. Within the relevant voting period, it + ## also includes blocks without deposits because we must + ## vote for a block only if it's part of our known history. + + syncTargetBlock: Option[Eth1BlockNumber] + + chainSyncingLoopFut: Future[void] + exchangeTransitionConfigurationLoopFut: Future[void] + stopFut: Future[void] - dataProvider: Web3DataProviderRef - latestEth1Block: Option[FullBlockId] + nextExpectedPayloadParams*: Option[NextExpectedPayloadParams] - depositsChain: Eth1Chain - eth1Progress: AsyncEvent + EtcStatus {.pure.} = enum + notExchangedYet + exchangeError + mismatch + match - exchangedConfiguration*: bool + DepositContractSyncStatus {.pure.} = enum + unknown + notSynced + synced - runFut: Future[void] - stopFut: Future[void] - getBeaconTime: GetBeaconTimeFn + ConnectionState = enum + NeverTested + Working + Degraded + + ELConnection* = ref object + engineUrl: EngineApiUrl + + web3: Option[Web3] + ## This will be `none` before connecting and while we are + ## reconnecting after a lost connetion. You can wait on + ## the future below for the moment the connection is active. + + connectingFut: Future[Result[Web3, string]] + ## This future will be replaced when the connection is lost. + + etcStatus: EtcStatus + ## The latest status of the `exchangeTransitionConfiguration` + ## exchange. + + state: ConnectionState - Web3DataProvider* = object - url: string - web3: Web3 - ns: Sender[DepositContract] - blockHeadersSubscription: Subscription + depositContractSyncStatus: DepositContractSyncStatus + ## Are we sure that this EL has synced the deposit contract? - Web3DataProviderRef* = ref Web3DataProvider + lastPayloadId: Option[engine_api.PayloadID] FullBlockId* = object number: Eth1BlockNumber @@ -164,6 +226,21 @@ type deposits*: seq[Deposit] hasMissingDeposits*: bool + BellatrixExecutionPayloadWithValue* = object + executionPayload*: ExecutionPayloadV1 + blockValue*: UInt256 + + CancunExecutionPayloadAndBlobs* = object + executionPayload*: ExecutionPayloadV3 + blockValue*: UInt256 + kzgs*: seq[engine_api.KZGCommitment] + blobs*: seq[engine_api.Blob] + + SomeEnginePayloadWithValue = + BellatrixExecutionPayloadWithValue | + GetPayloadV2Response | + CancunExecutionPayloadAndBlobs + declareCounter failed_web3_requests, "Failed web3 requests" @@ -182,26 +259,83 @@ declareGauge eth1_finalized_deposits, declareGauge eth1_chain_len, "The length of the in-memory chain of Eth1 blocks" -template cfg(m: Eth1Monitor): auto = - m.depositsChain.cfg +declareCounter engine_api_responses, + "Number of successful requests to the newPayload Engine API end-point", + labels = ["url", "request", "status"] + +declareCounter engine_api_timeouts, + "Number of timed-out requests to Engine API end-point", + labels = ["url", "request"] + +declareCounter engine_api_last_minute_forkchoice_updates_sent, + "Number of last minute requests to the forkchoiceUpdated Engine API end-point just before block proposals", + labels = ["url"] + +proc trackEngineApiRequest(connection: ELConnection, + request: FutureBase, requestName: string, + deadline: Future[void]) = + deadline.addCallback do (udata: pointer) {.gcsafe, raises: [Defect].}: + if not request.finished: + request.cancel() + engine_api_timeouts.inc(1, [connection.engineUrl.url, requestName]) + else: + let statusCode = if not request.failed: + 200 + elif request.error of ErrorResponse: + ((ref ErrorResponse) request.error).status + else: + 0 + + if request.failed: + case connection.state + of NeverTested, Working: + warn "Connection to EL node degraded", + url = url(connection.engineUrl), + failedRequest = requestName, + statusCode + of Degraded: + discard + connection.state = Degraded + else: + case connection.state + of Degraded: + info "Connection to EL node restored", + url = url(connection.engineUrl) + of NeverTested, Working: + discard + connection.state = Working + + engine_api_responses.inc(1, [connection.engineUrl.url, requestName, $statusCode]) + +template awaitOrRaiseOnTimeout[T](fut: Future[T], + timeout: Duration): T = + awaitWithTimeout(fut, timeout): + raise newException(DataProviderTimeout, "Timeout") + +template cfg(m: ELManager): auto = + m.eth1Chain.cfg + +template db(m: ELManager): BeaconChainDB = + m.eth1Chain.db -template depositChainBlocks*(m: Eth1Monitor): Deque[Eth1Block] = - m.depositsChain.blocks +func hasJwtSecret*(m: ELManager): bool = + for c in m.elConnections: + if c.engineUrl.jwtSecret.isSome: + return true -template finalizedDepositsMerkleizer(m: Eth1Monitor): auto = - m.depositsChain.finalizedDepositsMerkleizer +func isSynced*(m: ELManager): bool = + m.syncTargetBlock.isSome and + m.eth1Chain.blocks.len > 0 and + m.syncTargetBlock.get <= m.eth1Chain.blocks[^1].number -template headMerkleizer(m: Eth1Monitor): auto = - m.depositsChain.headMerkleizer +template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] = + m.eth1Chain.blocks -proc fixupWeb3Urls*(web3Url: var string) = - var normalizedUrl = toLowerAscii(web3Url) - if not (normalizedUrl.startsWith("https://") or - normalizedUrl.startsWith("http://") or - normalizedUrl.startsWith("wss://") or - normalizedUrl.startsWith("ws://")): - warn "The Web3 URL does not specify a protocol. Assuming a WebSocket server", web3Url - web3Url = "ws://" & web3Url +template finalizedDepositsMerkleizer(m: ELManager): auto = + m.eth1Chain.finalizedDepositsMerkleizer + +template headMerkleizer(m: ELManager): auto = + m.eth1Chain.headMerkleizer template toGaugeValue(x: Quantity): int64 = toGaugeValue(distinctBase x) @@ -236,14 +370,12 @@ func asEth2Digest*(x: BlockHash): Eth2Digest = template asBlockHash*(x: Eth2Digest): BlockHash = BlockHash(x.data) -from ../spec/datatypes/capella import ExecutionPayload, Withdrawal - func asConsensusWithdrawal(w: WithdrawalV1): capella.Withdrawal = capella.Withdrawal( index: w.index.uint64, validator_index: w.validatorIndex.uint64, address: ExecutionAddress(data: w.address.distinctBase), - amount: w.amount.uint64) + amount: GWei w.amount) func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = WithdrawalV1( @@ -252,7 +384,7 @@ func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = address: Address(w.address.data), amount: Quantity(w.amount)) -func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV1): +func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1): bellatrix.ExecutionPayload = template getTransaction(tt: TypedTransaction): bellatrix.Transaction = bellatrix.Transaction.init(tt.distinctBase) @@ -275,7 +407,16 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV1): transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init( mapIt(rpcExecutionPayload.transactions, it.getTransaction))) -func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV2): +func asConsensusType*(payloadWithValue: BellatrixExecutionPayloadWithValue): + bellatrix.ExecutionPayloadForSigning = + bellatrix.ExecutionPayloadForSigning( + executionPayload: payloadWithValue.executionPayload.asConsensusType, + blockValue: payloadWithValue.blockValue) + +template maybeDeref[T](o: Option[T]): T = o.get +template maybeDeref[V](v: V): V = v + +func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPayloadV2): capella.ExecutionPayload = template getTransaction(tt: TypedTransaction): bellatrix.Transaction = bellatrix.Transaction.init(tt.distinctBase) @@ -298,9 +439,15 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV2): transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init( mapIt(rpcExecutionPayload.transactions, it.getTransaction)), withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init( - mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal))) + mapIt(maybeDeref rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal))) + +func asConsensusType*(payloadWithValue: engine_api.GetPayloadV2Response): + capella.ExecutionPayloadForSigning = + capella.ExecutionPayloadForSigning( + executionPayload: payloadWithValue.executionPayload.asConsensusType, + blockValue: payloadWithValue.blockValue) -func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV3): +func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3): deneb.ExecutionPayload = template getTransaction(tt: TypedTransaction): bellatrix.Transaction = bellatrix.Transaction.init(tt.distinctBase) @@ -326,6 +473,19 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV3): withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init( mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal))) +func asConsensusType*(cancunPayload: CancunExecutionPayloadAndBlobs): + deneb.ExecutionPayloadForSigning = + deneb.ExecutionPayloadForSigning( + executionPayload: cancunPayload.executionPayload.asConsensusType, + blockValue: cancunPayload.blockValue, + # TODO + # The `mapIt` calls below are necessary only because we use different distinct + # types for KZG commitments and Blobs in the `web3` and the `deneb` spec types. + # Both are defined as `array[N, byte]` under the hood. + kzgs: KZGCommitments cancunPayload.kzgs.mapIt(it.bytes), + blobs: Blobs cancunPayload.blobs.mapIt(it.bytes) + ) + func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload): ExecutionPayloadV1 = template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = @@ -348,11 +508,17 @@ func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload): blockHash: executionPayload.block_hash.asBlockHash, transactions: mapIt(executionPayload.transactions, it.getTypedTransaction)) +template toEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = + WithdrawalV1( + index: Quantity(w.index), + validatorIndex: Quantity(w.validator_index), + address: Address(w.address.data), + amount: Quantity(w.amount)) + func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload): ExecutionPayloadV2 = template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = TypedTransaction(tt.distinctBase) - engine_api.ExecutionPayloadV2( parentHash: executionPayload.parent_hash.asBlockHash, feeRecipient: Address(executionPayload.fee_recipient.data), @@ -369,7 +535,7 @@ func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload): baseFeePerGas: executionPayload.base_fee_per_gas, blockHash: executionPayload.block_hash.asBlockHash, transactions: mapIt(executionPayload.transactions, it.getTypedTransaction), - withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal)) + withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal)) func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload): ExecutionPayloadV3 = @@ -445,254 +611,745 @@ func toVoteData(blk: Eth1Block): Eth1Data = func hash*(x: Eth1Data): Hash = hash(x.block_hash) -template awaitWithRetries*[T](lazyFutExpr: Future[T], - retries = 3, - timeout = web3Timeouts): untyped = - const - reqType = astToStr(lazyFutExpr) - var - retryDelayMs = 16000 - f: Future[T] - attempts = 0 +proc close(connection: ELConnection): Future[void] {.async.} = + if connection.web3.isSome: + awaitWithTimeout(connection.web3.get.close(), 30.seconds): + debug "Failed to close data provider in time" - while true: - f = lazyFutExpr - yield f or sleepAsync(timeout) - if not f.finished: - await cancelAndWait(f) - elif f.failed: - when not (f.error of CatchableError): - static: doAssert false, "f.error not CatchableError" - debug "Web3 request failed", req = reqType, err = f.error.msg - inc failed_web3_requests +proc isConnected(connection: ELConnection): bool = + connection.web3.isSome + +proc getJsonRpcRequestHeaders(jwtSecret: Option[seq[byte]]): + auto = + if jwtSecret.isSome: + let secret = jwtSecret.get + (proc(): seq[(string, string)] = + # https://www.rfc-editor.org/rfc/rfc6750#section-6.1.1 + @[("Authorization", "Bearer " & getSignedIatToken( + secret, (getTime() - initTime(0, 0)).inSeconds))]) + else: + (proc(): seq[(string, string)] = @[]) + +proc newWeb3*(engineUrl: EngineApiUrl): Future[Web3] = + newWeb3(engineUrl.url, getJsonRpcRequestHeaders(engineUrl.jwtSecret)) + +proc establishEngineApiConnection*(url: EngineApiUrl): + Future[Result[Web3, string]] {.async.} = + let web3Fut = newWeb3(url) + yield web3Fut or sleepAsync(engineApiConnectionTimeout) + + if (not web3Fut.finished) or web3Fut.failed: + await cancelAndWait(web3Fut) + if web3Fut.failed: + return err "Failed to setup Engine API connection: " & web3Fut.readError.msg else: - break + return err "Failed to setup Engine API connection" + else: + return ok web3Fut.read - inc attempts - if attempts >= retries: - var errorMsg = reqType & " failed " & $retries & " times" - if f.failed: errorMsg &= ". Last error: " & f.error.msg - raise newException(DataProviderFailure, errorMsg) +proc tryConnecting(connection: ELConnection): Future[bool] {.async.} = + if connection.isConnected: + return true - await sleepAsync(chronos.milliseconds(retryDelayMs)) - retryDelayMs *= 2 + if connection.connectingFut == nil: + connection.connectingFut = establishEngineApiConnection(connection.engineUrl) - read(f) + let web3Res = await connection.connectingFut + if web3Res.isErr: + return false + else: + connection.web3 = some web3Res.get + return true -proc close(p: Web3DataProviderRef): Future[void] {.async.} = - if p.blockHeadersSubscription != nil: - try: - awaitWithRetries(p.blockHeadersSubscription.unsubscribe()) - except CatchableError: - debug "Failed to clean up block headers subscription properly" +proc connectedRpcClient(connection: ELConnection): Future[RpcClient] {.async.} = + while not connection.isConnected: + if not await connection.tryConnecting(): + await sleepAsync(chronos.seconds(10)) - awaitWithTimeout(p.web3.close(), 30.seconds): - debug "Failed to close data provider in time" + return connection.web3.get.provider -proc getBlockByHash(p: Web3DataProviderRef, hash: BlockHash): - Future[BlockObject] = - return p.web3.provider.eth_getBlockByHash(hash, false) +proc getBlockByHash(rpcClient: RpcClient, hash: BlockHash): Future[BlockObject] = + rpcClient.eth_getBlockByHash(hash, false) -proc getBlockByNumber*(p: Web3DataProviderRef, +proc getBlockByNumber*(rpcClient: RpcClient, number: Eth1BlockNumber): Future[BlockObject] = - let hexNumber = try: &"0x{number:X}" # No leading 0's! - except ValueError as exc: raiseAssert exc.msg # Never fails - p.web3.provider.eth_getBlockByNumber(hexNumber, false) - -proc getPayloadV1*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.ExecutionPayloadV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - let epr = newFuture[engine_api.ExecutionPayloadV1]("getPayload") - epr.complete(default(engine_api.ExecutionPayloadV1)) - return epr - - p.dataProvider.web3.provider.engine_getPayloadV1(FixedBytes[8] payloadId) - -proc getPayloadV2*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.ExecutionPayloadV2] {.async.} = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - return default(engine_api.ExecutionPayloadV2) - - return (await p.dataProvider.web3.provider.engine_getPayloadV2( - FixedBytes[8] payloadId)).executionPayload - -proc getPayloadV3*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.ExecutionPayloadV3] {.async.} = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - return default(engine_api.ExecutionPayloadV3) - - return (await p.dataProvider.web3.provider.engine_getPayloadV3( - FixedBytes[8] payloadId)).executionPayload + let hexNumber = try: + &"0x{number:X}" # No leading 0's! + except ValueError as exc: + # Since the format above is valid, failing here should not be possible + raiseAssert exc.msg -proc getBlobsBundleV1*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.BlobsBundleV1] {.async.} = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - return default(engine_api.BlobsBundleV1) - - return (await p.dataProvider.web3.provider.engine_getBlobsBundleV1( - FixedBytes[8] payloadId)) - -proc newPayload*(p: Eth1Monitor, payload: engine_api.ExecutionPayloadV1): - Future[PayloadStatusV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.dataProvider.isNil: - let epr = newFuture[PayloadStatusV1]("newPayload") - epr.complete(PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - return epr - - p.dataProvider.web3.provider.engine_newPayloadV1(payload) - -proc newPayload*(p: Eth1Monitor, payload: engine_api.ExecutionPayloadV2): - Future[PayloadStatusV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.dataProvider.isNil: - let epr = newFuture[PayloadStatusV1]("newPayload") - epr.complete(PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - return epr - - p.dataProvider.web3.provider.engine_newPayloadV2(payload) - -proc newPayload*(p: Eth1Monitor, payload: engine_api.ExecutionPayloadV3): - Future[PayloadStatusV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.dataProvider.isNil: - let epr = newFuture[PayloadStatusV1]("newPayload") - epr.complete(PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - return epr - - p.dataProvider.web3.provider.engine_newPayloadV3(payload) - -proc forkchoiceUpdated*( - p: Eth1Monitor, headBlock, safeBlock, finalizedBlock: Eth2Digest): - Future[engine_api.ForkchoiceUpdatedResponse] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil or headBlock.isZeroMemory: - let fcuR = - newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated") - fcuR.complete(engine_api.ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing))) - return fcuR - - p.dataProvider.web3.provider.engine_forkchoiceUpdatedV1( - ForkchoiceStateV1( - headBlockHash: headBlock.asBlockHash, - safeBlockHash: safeBlock.asBlockHash, - finalizedBlockHash: finalizedBlock.asBlockHash), - none(engine_api.PayloadAttributesV1)) - -proc forkchoiceUpdated*( - p: Eth1Monitor, headBlock, safeBlock, finalizedBlock: Eth2Digest, - timestamp: uint64, randomData: array[32, byte], + rpcClient.eth_getBlockByNumber(hexNumber, false) + +proc getBlock(rpcClient: RpcClient, id: BlockHashOrNumber): Future[BlockObject] = + if id.isHash: + let hash = id.hash.asBlockHash() + return rpcClient.getBlockByHash(hash) + else: + return rpcClient.getBlockByNumber(id.number) + +func areSameAs(expectedParams: Option[NextExpectedPayloadParams], + latestHead, latestSafe, latestFinalized: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + feeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): bool = + if not(expectedParams.isSome and + expectedParams.get.headBlockHash == latestHead and + expectedParams.get.safeBlockHash == latestSafe and + expectedParams.get.finalizedBlockHash == latestFinalized): + return false + + if expectedParams.get.payloadAttributes == nil: + return false + + case expectedParams.get.payloadAttributes.kind + of ForkedPayloadAttributesKind.v1: + expectedParams.get.payloadAttributes.v1.timestamp.uint64 == timestamp and + expectedParams.get.payloadAttributes.v1.prevRandao.bytes == randomData.data and + expectedParams.get.payloadAttributes.v1.suggestedFeeRecipient == feeRecipient and + withdrawals.len == 0 + of ForkedPayloadAttributesKind.v2: + expectedParams.get.payloadAttributes.v2.timestamp.uint64 == timestamp and + expectedParams.get.payloadAttributes.v2.prevRandao.bytes == randomData.data and + expectedParams.get.payloadAttributes.v2.suggestedFeeRecipient == feeRecipient and + expectedParams.get.payloadAttributes.v2.withdrawals == withdrawals + +template makeForkedPayloadAttributes( + GetPayloadResponseType: type BellatrixExecutionPayloadWithValue, + timestamp: uint64, + randomData: Eth2Digest, suggestedFeeRecipient: Eth1Address, - withdrawals: Opt[seq[capella.Withdrawal]]): - Future[engine_api.ForkchoiceUpdatedResponse] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil or headBlock.isZeroMemory: - let fcuR = - newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated") - fcuR.complete(engine_api.ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing))) - return fcuR - - let forkchoiceState = ForkchoiceStateV1( - headBlockHash: headBlock.asBlockHash, - safeBlockHash: safeBlock.asBlockHash, - finalizedBlockHash: finalizedBlock.asBlockHash) - - if withdrawals.isNone: - p.dataProvider.web3.provider.engine_forkchoiceUpdatedV1( - forkchoiceState, - some(engine_api.PayloadAttributesV1( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: suggestedFeeRecipient))) + withdrawals: seq[WithdrawalV1]): ForkedPayloadAttributes = + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v1, + v1: engine_api.PayloadAttributesV1( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData.data, + suggestedFeeRecipient: suggestedFeeRecipient)) + +template makeForkedPayloadAttributes( + GetPayloadResponseType: typedesc[engine_api.GetPayloadV2Response|CancunExecutionPayloadAndBlobs], + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): ForkedPayloadAttributes = + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v2, + v2: engine_api.PayloadAttributesV2( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData.data, + suggestedFeeRecipient: suggestedFeeRecipient, + withdrawals: withdrawals)) + +proc forkchoiceUpdated(rpcClient: RpcClient, + state: ForkchoiceStateV1, + payloadAttributes: ForkedPayloadAttributes): Future[ForkchoiceUpdatedResponse] = + if payloadAttributes == nil: + rpcClient.engine_forkchoiceUpdatedV1(state, none PayloadAttributesV1) else: - p.dataProvider.web3.provider.engine_forkchoiceUpdatedV2( - forkchoiceState, - some(engine_api.PayloadAttributesV2( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: suggestedFeeRecipient, - withdrawals: mapIt(withdrawals.get, it.asEngineWithdrawal)))) + case payloadAttributes.kind + of ForkedPayloadAttributesKind.v1: + rpcClient.engine_forkchoiceUpdatedV1(state, some payloadAttributes.v1) + of ForkedPayloadAttributesKind.v2: + rpcClient.engine_forkchoiceUpdatedV2(state, some payloadAttributes.v2) + +func computeBlockValue(blk: ExecutionPayloadV1): UInt256 {.raises: [RlpError, Defect].} = + for transactionBytes in blk.transactions: + var rlp = rlpFromBytes distinctBase(transactionBytes) + let transaction = rlp.read(eth_types.Transaction) + result += distinctBase(effectiveGasTip(transaction, blk.baseFeePerGas)).u256 + +proc getPayloadFromSingleEL( + connection: ELConnection, + GetPayloadResponseType: type, + isForkChoiceUpToDate: bool, + headBlock, safeBlock, finalizedBlock: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): Future[GetPayloadResponseType] {.async.} = + + let + rpcClient = await connection.connectedRpcClient() + payloadId = if isForkChoiceUpToDate and connection.lastPayloadId.isSome: + connection.lastPayloadId.get + elif not headBlock.isZero: + engine_api_last_minute_forkchoice_updates_sent.inc(1, [connection.engineUrl.url]) + + let response = await rpcClient.forkchoiceUpdated( + ForkchoiceStateV1( + headBlockHash: headBlock.asBlockHash, + safeBlockHash: safeBlock.asBlockHash, + finalizedBlockHash: finalizedBlock.asBlockHash), + makeForkedPayloadAttributes( + GetPayloadResponseType, + timestamp, + randomData, + suggestedFeeRecipient, + withdrawals)) + + if response.payloadStatus.status != PayloadExecutionStatus.valid or + response.payloadId.isNone: + raise newException(CatchableError, "Head block is not a valid payload") + + # Give the EL some time to assemble the block + await sleepAsync(chronos.milliseconds 500) + + response.payloadId.get + else: + raise newException(CatchableError, "No confirmed execution head yet") + + when GetPayloadResponseType is CancunExecutionPayloadAndBlobs: + let + response = await engine_api.getPayload(rpcClient, + GetPayloadV3Response, + payloadId) + blobsBundle = await engine_getBlobsBundleV1(rpcClient, payloadId) + # TODO validate the blobs bundle + return CancunExecutionPayloadAndBlobs( + executionPayload: response.executionPayload, + blockValue: response.blockValue, + kzgs: blobsBundle.kzgs, # TODO Avoid the copies here with `move` + blobs: blobsBundle.blobs) + elif GetPayloadResponseType is BellatrixExecutionPayloadWithValue: + let payload= await engine_api.getPayload(rpcClient, ExecutionPayloadV1, payloadId) + return BellatrixExecutionPayloadWithValue( + executionPayload: payload, + blockValue: computeBlockValue payload) + else: + return await engine_api.getPayload(rpcClient, GetPayloadResponseType, payloadId) + +proc cmpGetPayloadResponses(lhs, rhs: SomeEnginePayloadWithValue): int = + cmp(distinctBase lhs.blockValue, distinctBase rhs.blockValue) -# TODO can't be defined within exchangeTransitionConfiguration -func `==`(x, y: Quantity): bool {.borrow.} +template EngineApiResponseType*(T: type bellatrix.ExecutionPayloadForSigning): type = + BellatrixExecutionPayloadWithValue + +template EngineApiResponseType*(T: type capella.ExecutionPayloadForSigning): type = + engine_api.GetPayloadV2Response + +template EngineApiResponseType*(T: type deneb.ExecutionPayloadForSigning): type = + CancunExecutionPayloadAndBlobs + +template payload(response: engine_api.ExecutionPayloadV1): engine_api.ExecutionPayloadV1 = + response + +template payload(response: engine_api.GetPayloadV2Response): engine_api.ExecutionPayloadV1OrV2 = + response.executionPayload + +template payload(response: engine_api.GetPayloadV3Response): engine_api.ExecutionPayloadV3 = + response.executionPayload + +template toEngineWithdrawals*(withdrawals: seq[capella.Withdrawal]): seq[WithdrawalV1] = + mapIt(withdrawals, toEngineWithdrawal(it)) + +template toFork(T: type ExecutionPayloadV1): ConsensusFork = + ConsensusFork.Bellatrix + +template toFork(T: typedesc[ExecutionPayloadV1OrV2|ExecutionPayloadV2]): ConsensusFork = + ConsensusFork.Capella + +template toFork(T: type ExecutionPayloadV3): ConsensusFork = + ConsensusFork.Deneb + +proc getPayload*(m: ELManager, + PayloadType: type ForkyExecutionPayloadForSigning, + headBlock, safeBlock, finalizedBlock: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[capella.Withdrawal]): + Future[Opt[PayloadType]] {.async.} = + if m.elConnections.len == 0: + return err() + + let + engineApiWithdrawals = toEngineWithdrawals withdrawals + let isFcUpToDate = m.nextExpectedPayloadParams.areSameAs( + headBlock, safeBlock, finalizedBlock, timestamp, + randomData, suggestedFeeRecipient, engineApiWithdrawals) + + let + timeout = when PayloadType is deneb.ExecutionPayloadForSigning: + # TODO We should follow the spec and track the timeouts of + # the individual engine API calls inside `getPayloadFromSingleEL`. + GETPAYLOAD_TIMEOUT + GETBLOBS_TIMEOUT + else: + GETPAYLOAD_TIMEOUT + deadline = sleepAsync(timeout) + requests = m.elConnections.mapIt(it.getPayloadFromSingleEL( + EngineApiResponseType(PayloadType), + isFcUpToDate, headBlock, safeBlock, finalizedBlock, + timestamp, randomData, suggestedFeeRecipient, engineApiWithdrawals + )) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + var bestPayloadIdx = none int + for idx, req in requests: + if not req.finished: + req.cancel() + elif req.failed: + error "Failed to get execution payload from EL", + url = m.elConnections[idx].engineUrl.url, + err = req.error.msg + else: + const payloadFork = PayloadType.toFork + when payloadFork >= ConsensusFork.Capella: + when payloadFork == ConsensusFork.Capella: + # TODO: The engine_api module may offer an alternative API where it is guaranteed + # to return the correct response type (i.e. the rule below will be enforced + # during deserialization). + if req.read.executionPayload.withdrawals.isNone: + warn "Execution client did not return any withdrawals for a post-Shanghai block", + url = m.elConnections[idx].engineUrl.url + continue + + if engineApiWithdrawals != req.read.executionPayload.withdrawals.maybeDeref: + warn "Execution client did not return correct withdrawals", + withdrawals_from_cl = engineApiWithdrawals, + withdrawals_from_el = req.read.executionPayload.withdrawals + + if bestPayloadIdx.isNone: + bestPayloadIdx = some idx + else: + if cmpGetPayloadResponses(req.read, requests[bestPayloadIdx.get].read) > 0: + bestPayloadIdx = some idx + + if bestPayloadIdx.isSome: + return ok requests[bestPayloadIdx.get].read.asConsensusType + else: + return err() + +proc waitELToSyncDeposits(connection: ELConnection, + minimalRequiredBlock: BlockHash) {.async.} = + var rpcClient = await connection.connectedRpcClient() + + if connection.depositContractSyncStatus == DepositContractSyncStatus.synced: + return + + var attempt = 0 + + while true: + try: + discard awaitOrRaiseOnTimeout(rpcClient.getBlockByHash(minimalRequiredBlock), + web3RequestsTimeout) + connection.depositContractSyncStatus = DepositContractSyncStatus.synced + return + except CancelledError as err: + trace "waitELToSyncDepositContract cancelled", + url = connection.engineUrl.url + raise err + except CatchableError as err: + connection.depositContractSyncStatus = DepositContractSyncStatus.notSynced + if attempt == 0: + warn "Failed to obtain the most recent known block from the execution " & + "layer node (the node is probably not synced)", + url = connection.engineUrl.url, + blk = minimalRequiredBlock, + err = err.msg + elif attempt mod 60 == 0: + # This warning will be produced every 30 minutes + warn "Still failing to obtain the most recent known block from the " & + "execution layer node (the node is probably still not synced)", + url = connection.engineUrl.url, + blk = minimalRequiredBlock, + err = err.msg + await sleepAsync(seconds(30)) + rpcClient = await connection.connectedRpcClient() + +proc networkHasDepositContract(m: ELManager): bool = + not m.cfg.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue + +func mostRecentKnownBlock(m: ELManager): BlockHash = + if m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount() > 0: + m.eth1Chain.finalizedBlockHash.asBlockHash + else: + m.depositContractBlockHash + +proc selectConnectionForChainSyncing(m: ELManager): Future[ELConnection] {.async.} = + doAssert m.elConnections.len > 0 + + let connectionsFuts = mapIt( + m.elConnections, + if m.networkHasDepositContract: + FutureBase waitELToSyncDeposits(it, m.mostRecentKnownBlock) + else: + FutureBase connectedRpcClient(it)) + + let firstConnected = await firstCompletedFuture(connectionsFuts) + + # TODO: Ideally, the cancellation will be handled automatically + # by a helper like `firstCompletedFuture` + for future in connectionsFuts: + if future != firstConnected: + future.cancel() + + return m.elConnections[find(connectionsFuts, firstConnected)] + +proc getBlobsBundleFromASyncedEL( + m: ELManager, + payloadId: bellatrix.PayloadID): Future[BlobsBundleV1] {.async.} = + let + connection = await m.selectConnectionForChainSyncing() + rpcClient = await connection.connectedRpcClient() + + return await rpcClient.engine_getBlobsBundleV1(FixedBytes[8] payloadId) + +proc getBlobsBundleV1*( + m: ELManager, payloadId: bellatrix.PayloadID): + Future[Opt[BlobsBundleV1]] {.async.} = + if m.elConnections.len == 0: + return Opt.none BlobsBundleV1 + + return Opt.some: + try: + awaitWithTimeout( + m.getBlobsBundleFromASyncedEL(payload_id), + GETBLOBS_TIMEOUT): + # beacon_block_payload_errors.inc() + warn "Getting blobs sidecar from Engine API timed out", payload_id + return Opt.none BlobsBundleV1 + except CatchableError: + return Opt.none BlobsBundleV1 + +proc sendNewPayloadToSingleEL(connection: ELConnection, + payload: engine_api.ExecutionPayloadV1): + Future[PayloadStatusV1] {.async.} = + let rpcClient = await connection.connectedRpcClient() + return await rpcClient.engine_newPayloadV1(payload) + +proc sendNewPayloadToSingleEL(connection: ELConnection, + payload: engine_api.ExecutionPayloadV2): + Future[PayloadStatusV1] {.async.} = + let rpcClient = await connection.connectedRpcClient() + return await rpcClient.engine_newPayloadV2(payload) + +proc sendNewPayloadToSingleEL(connection: ELConnection, + payload: engine_api.ExecutionPayloadV3): + Future[PayloadStatusV1] {.async.} = + let rpcClient = await connection.connectedRpcClient() + return await rpcClient.engine_newPayloadV3(payload) type - EtcStatus {.pure.} = enum - exchangeError - mismatch - match + StatusRelation = enum + newStatusIsPreferable + oldStatusIsOk + disagreement + +proc compareStatuses(prevStatus, newStatus: PayloadExecutionStatus): StatusRelation = + case prevStatus + of PayloadExecutionStatus.syncing: + if newStatus == PayloadExecutionStatus.syncing: + oldStatusIsOk + else: + newStatusIsPreferable + + of PayloadExecutionStatus.valid: + case newStatus + of PayloadExecutionStatus.syncing, + PayloadExecutionStatus.accepted, + PayloadExecutionStatus.valid: + oldStatusIsOk + of PayloadExecutionStatus.invalid_block_hash, + PayloadExecutionStatus.invalid: + disagreement + + of PayloadExecutionStatus.invalid: + case newStatus + of PayloadExecutionStatus.syncing, + PayloadExecutionStatus.invalid: + oldStatusIsOk + of PayloadExecutionStatus.valid, + PayloadExecutionStatus.accepted, + PayloadExecutionStatus.invalid_block_hash: + disagreement + + of PayloadExecutionStatus.accepted: + case newStatus + of PayloadExecutionStatus.accepted, + PayloadExecutionStatus.syncing: + oldStatusIsOk + of PayloadExecutionStatus.valid: + newStatusIsPreferable + of PayloadExecutionStatus.invalid_block_hash, + PayloadExecutionStatus.invalid: + disagreement + + of PayloadExecutionStatus.invalid_block_hash: + if newStatus == PayloadExecutionStatus.invalid_block_hash: + oldStatusIsOk + else: + disagreement + +type + ELConsensusViolationDetector = object + selectedResponse: Option[int] + disagreementAlreadyDetected: bool + +proc init(T: type ELConsensusViolationDetector): T = + ELConsensusViolationDetector(selectedResponse: none int, + disagreementAlreadyDetected: false) + +proc processResponse[ELResponseType]( + d: var ELConsensusViolationDetector, + connections: openArray[ELConnection], + requests: openArray[Future[ELResponseType]], + idx: int) = + + doAssert requests[idx].completed + + let status = try: requests[idx].read.status + except CatchableError: raiseAssert "checked above" + if d.selectedResponse.isNone: + d.selectedResponse = some idx + elif not d.disagreementAlreadyDetected: + let prevStatus = try: requests[d.selectedResponse.get].read.status + except CatchableError: raiseAssert "previously checked" + case compareStatuses(status, prevStatus) + of newStatusIsPreferable: + d.selectedResponse = some idx + of oldStatusIsOk: + discard + of disagreement: + d.disagreementAlreadyDetected = true + error "Execution layer consensus violation detected", + responseType = name(ELResponseType), + url1 = connections[d.selectedResponse.get].engineUrl.url, + status1 = prevStatus, + url2 = connections[idx].engineUrl.url, + status2 = status + +proc sendNewPayload*(m: ELManager, + payload: engine_api.ExecutionPayloadV1 | engine_api.ExecutionPayloadV2 | engine_api.ExecutionPayloadV3): + Future[PayloadExecutionStatus] {.async.} = + let + earlyDeadline = sleepAsync(chronos.seconds 1) + deadline = sleepAsync(NEWPAYLOAD_TIMEOUT) + requests = m.elConnections.mapIt: + let req = sendNewPayloadToSingleEL(it, payload) + trackEngineApiRequest(it, req, "newPayload", deadline) + req + + requestsCompleted = allFutures(requests) + + await requestsCompleted or earlyDeadline -proc exchangeTransitionConfiguration*(p: Eth1Monitor): Future[EtcStatus] {.async.} = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil: - debug "exchangeTransitionConfiguration: nil Eth1Monitor" - return EtcStatus.exchangeError - - let dataProvider = p.dataProvider - if dataProvider.isNil: - return EtcStatus.exchangeError - - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#engine_exchangetransitionconfigurationv1 - let consensusCfg = TransitionConfigurationV1( - terminalTotalDifficulty: p.depositsChain.cfg.TERMINAL_TOTAL_DIFFICULTY, - terminalBlockHash: p.depositsChain.cfg.TERMINAL_BLOCK_HASH, - terminalBlockNumber: Quantity 0) - let executionCfg = + var + stillPending = newSeq[Future[PayloadStatusV1]]() + responseProcessor = init ELConsensusViolationDetector + + for idx, req in requests: + if not req.finished: + stillPending.add req + elif not req.failed: + responseProcessor.processResponse(m.elConnections, requests, idx) + + if responseProcessor.disagreementAlreadyDetected: + return PayloadExecutionStatus.invalid + elif responseProcessor.selectedResponse.isSome: + return requests[responseProcessor.selectedResponse.get].read.status + + await requestsCompleted or deadline + + for idx, req in requests: + if req.completed and req in stillPending: + responseProcessor.processResponse(m.elConnections, requests, idx) + + return if responseProcessor.disagreementAlreadyDetected: + PayloadExecutionStatus.invalid + elif responseProcessor.selectedResponse.isSome: + requests[responseProcessor.selectedResponse.get].read.status + else: + PayloadExecutionStatus.syncing + +proc forkchoiceUpdatedForSingleEL( + connection: ELConnection, + state: ref ForkchoiceStateV1, + payloadAttributes: ForkedPayloadAttributes): + Future[PayloadStatusV1] {.async.} = + let + rpcClient = await connection.connectedRpcClient() + response = await rpcClient.forkchoiceUpdated(state[], payloadAttributes) + + if response.payloadStatus.status notin {syncing, valid, invalid}: + debug "Invalid fork-choice updated response from the EL", + payloadStatus = response.payloadStatus + return + + if response.payloadStatus.status == PayloadExecutionStatus.valid and + response.payloadId.isSome: + connection.lastPayloadId = response.payloadId + + return response.payloadStatus + +proc forkchoiceUpdated*(m: ELManager, + headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest, + payloadAttributes: ForkedPayloadAttributes = nil): + Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} = + doAssert not headBlockHash.isZero + + # Allow finalizedBlockHash to be 0 to avoid sync deadlocks. + # + # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events + # has "Before the first finalized block occurs in the system the finalized + # block hash provided by this event is stubbed with + # `0x0000000000000000000000000000000000000000000000000000000000000000`." + # and + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/bellatrix/validator.md#executionpayload + # notes "`finalized_block_hash` is the hash of the latest finalized execution + # payload (`Hash32()` if none yet finalized)" + + if m.elConnections.len == 0: + return (PayloadExecutionStatus.syncing, none BlockHash) + + m.nextExpectedPayloadParams = some NextExpectedPayloadParams( + headBlockHash: headBlockHash, + safeBlockHash: safeBlockHash, + finalizedBlockHash: finalizedBlockHash, + payloadAttributes: payloadAttributes) + + let + state = newClone ForkchoiceStateV1( + headBlockHash: headBlockHash.asBlockHash, + safeBlockHash: safeBlockHash.asBlockHash, + finalizedBlockHash: finalizedBlockHash.asBlockHash) + earlyDeadline = sleepAsync(chronos.seconds 1) + deadline = sleepAsync(FORKCHOICEUPDATED_TIMEOUT) + requests = m.elConnections.mapIt: + let req = it.forkchoiceUpdatedForSingleEL(state, payloadAttributes) + trackEngineApiRequest(it, req, "forkchoiceUpdated", deadline) + req + requestsCompleted = allFutures(requests) + + await requestsCompleted or earlyDeadline + + var + stillPending = newSeq[Future[PayloadStatusV1]]() + responseProcessor = init ELConsensusViolationDetector + + for idx, req in requests: + if not req.finished: + stillPending.add req + elif not req.failed: + responseProcessor.processResponse(m.elConnections, requests, idx) + + if responseProcessor.disagreementAlreadyDetected: + return (PayloadExecutionStatus.invalid, none BlockHash) + elif responseProcessor.selectedResponse.isSome: + return (requests[responseProcessor.selectedResponse.get].read.status, + requests[responseProcessor.selectedResponse.get].read.latestValidHash) + + await requestsCompleted or deadline + + for idx, req in requests: + if req.completed and req in stillPending: + responseProcessor.processResponse(m.elConnections, requests, idx) + + return if responseProcessor.disagreementAlreadyDetected: + (PayloadExecutionStatus.invalid, none BlockHash) + elif responseProcessor.selectedResponse.isSome: + (requests[responseProcessor.selectedResponse.get].read.status, + requests[responseProcessor.selectedResponse.get].read.latestValidHash) + else: + (PayloadExecutionStatus.syncing, none BlockHash) + +proc forkchoiceUpdatedNoResult*(m: ELManager, + headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest, + payloadAttributes: ForkedPayloadAttributes = nil) {.async.} = + discard await m.forkchoiceUpdated( + headBlockHash, safeBlockHash, finalizedBlockHash, payloadAttributes) + +# TODO can't be defined within exchangeConfigWithSingleEL +proc `==`(x, y: Quantity): bool {.borrow, noSideEffect.} + +proc exchangeConfigWithSingleEL(m: ELManager, connection: ELConnection) {.async.} = + let rpcClient = await connection.connectedRpcClient() + + if m.eth1Network.isSome and + connection.etcStatus == EtcStatus.notExchangedYet: try: - awaitWithRetries( - dataProvider.web3.provider.engine_exchangeTransitionConfigurationV1( - consensusCfg), + let + providerChain = + awaitOrRaiseOnTimeout(rpcClient.eth_chainId(), web3RequestsTimeout) + + # https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids + expectedChain = case m.eth1Network.get + of mainnet: 1.Quantity + of ropsten: 3.Quantity + of rinkeby: 4.Quantity + of goerli: 5.Quantity + of sepolia: 11155111.Quantity # https://chainid.network/ + if expectedChain != providerChain: + warn "The specified EL client is connected to a different chain", + url = connection.engineUrl, + expectedChain = distinctBase(expectedChain), + actualChain = distinctBase(providerChain) + connection.etcStatus = EtcStatus.mismatch + return + except CatchableError as exc: + # Typically because it's not synced through EIP-155, assuming this Web3 + # endpoint has been otherwise working. + debug "Failed to obtain eth_chainId", + error = exc.msg + + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/specification.md#engine_exchangetransitionconfigurationv1 + let + ourConf = TransitionConfigurationV1( + terminalTotalDifficulty: m.eth1Chain.cfg.TERMINAL_TOTAL_DIFFICULTY, + terminalBlockHash: m.eth1Chain.cfg.TERMINAL_BLOCK_HASH, + terminalBlockNumber: Quantity 0) + elConf = try: + awaitOrRaiseOnTimeout( + rpcClient.engine_exchangeTransitionConfigurationV1(ourConf), timeout = 1.seconds) except CatchableError as err: - warn "Failed to exchange transition configuration", err = err.msg - return EtcStatus.exchangeError + error "Failed to exchange transition configuration", + url = connection.engineUrl, err = err.msg + connection.etcStatus = EtcStatus.exchangeError + return - return - if consensusCfg.terminalTotalDifficulty != executionCfg.terminalTotalDifficulty: + connection.etcStatus = + if ourConf.terminalTotalDifficulty != elConf.terminalTotalDifficulty: error "Engine API configured with different terminal total difficulty", - engineAPI_value = executionCfg.terminalTotalDifficulty, - localValue = consensusCfg.terminalTotalDifficulty + engineAPI_value = elConf.terminalTotalDifficulty, + localValue = ourConf.terminalTotalDifficulty EtcStatus.mismatch - elif consensusCfg.terminalBlockNumber != executionCfg.terminalBlockNumber: + elif ourConf.terminalBlockNumber != elConf.terminalBlockNumber: warn "Engine API reporting different terminal block number", - engineAPI_value = executionCfg.terminalBlockNumber.uint64, - localValue = consensusCfg.terminalBlockNumber.uint64 + engineAPI_value = elConf.terminalBlockNumber.uint64, + localValue = ourConf.terminalBlockNumber.uint64 EtcStatus.mismatch - elif consensusCfg.terminalBlockHash != executionCfg.terminalBlockHash: + elif ourConf.terminalBlockHash != elConf.terminalBlockHash: warn "Engine API reporting different terminal block hash", - engineAPI_value = executionCfg.terminalBlockHash, - localValue = consensusCfg.terminalBlockHash + engineAPI_value = elConf.terminalBlockHash, + localValue = ourConf.terminalBlockHash EtcStatus.mismatch else: - if not p.exchangedConfiguration: + if connection.etcStatus == EtcStatus.notExchangedYet: # Log successful engine configuration exchange once at startup - p.exchangedConfiguration = true - info "Exchanged engine configuration", - terminalTotalDifficulty = executionCfg.terminalTotalDifficulty, - terminalBlockHash = executionCfg.terminalBlockHash, - terminalBlockNumber = executionCfg.terminalBlockNumber.uint64 + info "Successfully exchanged engine configuration", + url = connection.engineUrl EtcStatus.match +proc exchangeTransitionConfiguration*(m: ELManager) {.async.} = + if m.elConnections.len == 0: + return + + let + deadline = sleepAsync(3.seconds) + requests = m.elConnections.mapIt(m.exchangeConfigWithSingleEL(it)) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + for idx, req in requests: + if not req.finished: + m.elConnections[idx].etcStatus = EtcStatus.exchangeError + req.cancel() + template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped = var res: ValueType fromJson(j[fieldName], fieldName, res) @@ -701,10 +1358,11 @@ template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped template init[N: static int](T: type DynamicBytes[N, N]): T = T newSeq[byte](N) -proc fetchTimestampWithRetries(blkParam: Eth1Block, p: Web3DataProviderRef) {.async.} = - let blk = blkParam - let web3block = awaitWithRetries( - p.getBlockByHash(blk.hash.asBlockHash)) +proc fetchTimestampWithRetries(rpcClient: RpcClient, + blk: Eth1Block) {.async.} = + let web3block = awaitOrRaiseOnTimeout( + rpcClient.getBlockByHash(blk.hash.asBlockHash), + web3RequestsTimeout) blk.timestamp = Eth1BlockTimestamp web3block.timestamp func depositEventsToBlocks(depositsList: JsonNode): seq[Eth1Block] {. @@ -768,20 +1426,16 @@ type DepositCountIncorrect DepositCountUnavailable -template awaitOrRaiseOnTimeout[T](fut: Future[T], - timeout: Duration): T = - awaitWithTimeout(fut, timeout): - raise newException(DataProviderTimeout, "Timeout") - when hasDepositRootChecks: const contractCallTimeout = 60.seconds - proc fetchDepositContractData(p: Web3DataProviderRef, blk: Eth1Block): - Future[DepositContractDataStatus] {.async.} = + proc fetchDepositContractData(rpcClient: RpcClient, + depositContact: Sender[DepositContract], + blk: Eth1Block): Future[DepositContractDataStatus] {.async.} = let - depositRoot = p.ns.get_deposit_root.call(blockNumber = blk.number) - rawCount = p.ns.get_deposit_count.call(blockNumber = blk.number) + depositRoot = depositContract.get_deposit_root.call(blockNumber = blk.number) + rawCount = depositContract.get_deposit_count.call(blockNumber = blk.number) try: let fetchedRoot = asEth2Digest( @@ -812,14 +1466,6 @@ when hasDepositRootChecks: err = err.msg result = DepositCountUnavailable -proc onBlockHeaders(p: Web3DataProviderRef, - blockHeaderHandler: BlockHeaderHandler, - errorHandler: SubscriptionErrorHandler) {.async.} = - info "Waiting for new Eth1 block headers" - - p.blockHeadersSubscription = awaitWithRetries( - p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler)) - proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) = ## Called on block finalization to delete old and now redundant data. let initialChunks = chain.finalizedDepositsMerkleizer.getChunkCount @@ -907,7 +1553,7 @@ proc trackFinalizedState(chain: var Eth1Chain, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64, blockProposalExpected = false): bool = - ## This function will return true if the Eth1Monitor is synced + ## This function will return true if the ELManager is synced ## to the finalization point. if chain.blocks.len == 0: @@ -946,10 +1592,10 @@ proc trackFinalizedState(chain: var Eth1Chain, if result: chain.pruneOldBlocks(finalizedStateDepositIndex) -template trackFinalizedState*(m: Eth1Monitor, +template trackFinalizedState*(m: ELManager, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64): bool = - trackFinalizedState(m.depositsChain, finalizedEth1Data, finalizedStateDepositIndex) + trackFinalizedState(m.eth1Chain, finalizedEth1Data, finalizedStateDepositIndex) # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#get_eth1_data proc getBlockProposalData*(chain: var Eth1Chain, @@ -1032,44 +1678,19 @@ proc getBlockProposalData*(chain: var Eth1Chain, else: result.hasMissingDeposits = true -template getBlockProposalData*(m: Eth1Monitor, +template getBlockProposalData*(m: ELManager, state: ForkedHashedBeaconState, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64): BlockProposalEth1Data = getBlockProposalData( - m.depositsChain, state, finalizedEth1Data, finalizedStateDepositIndex) + m.eth1Chain, state, finalizedEth1Data, finalizedStateDepositIndex) -proc getJsonRpcRequestHeaders(jwtSecret: Option[seq[byte]]): - auto = - if jwtSecret.isSome: - let secret = jwtSecret.get - (proc(): seq[(string, string)] = - # https://www.rfc-editor.org/rfc/rfc6750#section-6.1.1 - @[("Authorization", "Bearer " & getSignedIatToken( - secret, (getTime() - initTime(0, 0)).inSeconds))]) - else: - (proc(): seq[(string, string)] = @[]) - -proc new*(T: type Web3DataProvider, - depositContractAddress: Eth1Address, - web3Url: string, - jwtSecret: Option[seq[byte]]): - Future[Result[Web3DataProviderRef, string]] {.async.} = - let web3Fut = newWeb3(web3Url, getJsonRpcRequestHeaders(jwtSecret)) - yield web3Fut or sleepAsync(10.seconds) - if (not web3Fut.finished) or web3Fut.failed: - await cancelAndWait(web3Fut) - if web3Fut.failed: - return err "Failed to setup web3 connection: " & web3Fut.readError.msg - else: - return err "Failed to setup web3 connection" - - let - web3 = web3Fut.read - ns = web3.contractSender(DepositContract, depositContractAddress) - - return ok Web3DataProviderRef(url: web3Url, web3: web3, ns: ns) +proc new*(T: type ELConnection, + engineUrl: EngineApiUrl): T = + ELConnection( + engineUrl: engineUrl, + depositContractSyncStatus: DepositContractSyncStatus.unknown) template getOrDefault[T, E](r: Result[T, E]): T = type TT = T @@ -1105,54 +1726,27 @@ proc init*(T: type Eth1Chain, finalizedDepositsMerkleizer: m, headMerkleizer: copy m) -proc getBlock(provider: Web3DataProviderRef, id: BlockHashOrNumber): - Future[BlockObject] = - if id.isHash: - let hash = id.hash.asBlockHash() - return provider.getBlockByHash(hash) - else: - return provider.getBlockByNumber(id.number) - -proc currentEpoch(m: Eth1Monitor): Epoch = - if m.getBeaconTime != nil: - m.getBeaconTime().slotOrZero.epoch - else: - Epoch 0 +proc new*(T: type ELManager, + cfg: RuntimeConfig, + depositContractBlockNumber: uint64, + depositContractBlockHash: Eth2Digest, + db: BeaconChainDB, + engineApiUrls: seq[EngineApiUrl], + eth1Network: Option[Eth1Network]): T = + let + eth1Chain = Eth1Chain.init( + cfg, db, depositContractBlockNumber, depositContractBlockHash) -proc init*(T: type Eth1Monitor, - cfg: RuntimeConfig, - depositContractBlockNumber: uint64, - depositContractBlockHash: Eth2Digest, - db: BeaconChainDB, - getBeaconTime: GetBeaconTimeFn, - web3Urls: seq[string], - eth1Network: Option[Eth1Network], - forcePolling: bool, - jwtSecret: Option[seq[byte]]): T = - doAssert web3Urls.len > 0 - var web3Urls = web3Urls - for url in mitems(web3Urls): - fixupWeb3Urls url - - debug "Initializing Eth1Monitor", + debug "Initializing ELManager", depositContractBlockNumber, depositContractBlockHash - let eth1Chain = Eth1Chain.init( - cfg, db, depositContractBlockNumber, depositContractBlockHash) - - T(state: Initialized, - depositsChain: eth1Chain, + T(eth1Chain: eth1Chain, depositContractAddress: cfg.DEPOSIT_CONTRACT_ADDRESS, - depositContractDeployedAt: BlockHashOrNumber( - isHash: true, - hash: depositContractBlockHash), - getBeaconTime: getBeaconTime, - web3Urls: web3Urls, + depositContractBlockNumber: depositContractBlockNumber, + depositContractBlockHash: depositContractBlockHash.asBlockHash, + elConnections: mapIt(engineApiUrls, ELConnection.new(it)), eth1Network: eth1Network, - eth1Progress: newAsyncEvent(), - forcePolling: forcePolling, - jwtSecret: jwtSecret, blocksPerLogsRequest: targetBlocksPerLogsRequest) proc safeCancel(fut: var Future[void]) = @@ -1166,92 +1760,34 @@ func clear(chain: var Eth1Chain) = chain.headMerkleizer = copy chain.finalizedDepositsMerkleizer chain.hasConsensusViolation = false -proc detectPrimaryProviderComingOnline(m: Eth1Monitor) {.async.} = - const checkInterval = 30.seconds - - let - web3Url = m.web3Urls[0] - initialRunFut = m.runFut - - # This is a way to detect that the monitor was restarted. When this - # happens, this function will just return terminating the "async thread" - while m.runFut == initialRunFut: - let tempProviderRes = await Web3DataProvider.new( - m.depositContractAddress, - web3Url, - m.jwtSecret) - - if tempProviderRes.isErr: - await sleepAsync(checkInterval) - continue - - var tempProvider = tempProviderRes.get +proc doStop(m: ELManager) {.async.} = + safeCancel m.chainSyncingLoopFut + safeCancel m.exchangeTransitionConfigurationLoopFut - # Use one of the get/request-type methods from - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/common.md#underlying-protocol - # which doesn't take parameters and returns a small structure, to ensure - # this works with engine API endpoints. - let testRequest = tempProvider.web3.provider.eth_syncing() + if m.elConnections.len > 0: + let closeConnectionFutures = mapIt(m.elConnections, close(it)) + await allFutures(closeConnectionFutures) - yield testRequest or sleepAsync(web3Timeouts) - - traceAsyncErrors tempProvider.close() - - if testRequest.completed and m.state == Started: - m.state = ReadyToRestartToPrimary - return - else: - await sleepAsync(checkInterval) - -proc doStop(m: Eth1Monitor) {.async.} = - safeCancel m.runFut - - if m.dataProvider != nil: - awaitWithTimeout(m.dataProvider.close(), 30.seconds): - debug "Failed to close data provider in time" - m.dataProvider = nil - -proc ensureDataProvider*(m: Eth1Monitor) {.async.} = - if m.isNil or not m.dataProvider.isNil: - return - - let web3Url = m.web3Urls[m.startIdx mod m.web3Urls.len] - inc m.startIdx - - m.dataProvider = block: - let v = await Web3DataProvider.new( - m.depositContractAddress, web3Url, m.jwtSecret) - if v.isErr(): - raise (ref CatchableError)(msg: v.error()) - info "Established connection to execution layer", url = web3Url - v.get() - -proc stop(m: Eth1Monitor) {.async.} = - if m.state in {Started, ReadyToRestartToPrimary}: - m.state = Stopping - m.stopFut = m.doStop() +proc stop(m: ELManager) {.async.} = + if not m.stopFut.isNil: await m.stopFut - m.state = Stopped - elif m.state == Stopping: + else: + m.stopFut = m.doStop() await m.stopFut + m.stopFut = nil const votedBlocksSafetyMargin = 50 -func latestEth1BlockNumber(m: Eth1Monitor): Eth1BlockNumber = - if m.latestEth1Block.isSome: - Eth1BlockNumber m.latestEth1Block.get.number - else: - Eth1BlockNumber 0 - -func earliestBlockOfInterest(m: Eth1Monitor): Eth1BlockNumber = - m.latestEth1BlockNumber - (2 * m.cfg.ETH1_FOLLOW_DISTANCE) - votedBlocksSafetyMargin +func earliestBlockOfInterest(m: ELManager, latestEth1BlockNumber: Eth1BlockNumber): Eth1BlockNumber = + latestEth1BlockNumber - (2 * m.cfg.ETH1_FOLLOW_DISTANCE) - votedBlocksSafetyMargin -proc syncBlockRange(m: Eth1Monitor, +proc syncBlockRange(m: ELManager, + rpcClient: RpcClient, + depositContract: Sender[DepositContract], fromBlock, toBlock, fullSyncFromBlock: Eth1BlockNumber) {.gcsafe, async.} = - doAssert m.dataProvider != nil, "close not called concurrently" - doAssert m.depositsChain.blocks.len > 0 + doAssert m.eth1Chain.blocks.len > 0 var currentBlock = fromBlock while currentBlock <= toBlock: @@ -1273,14 +1809,14 @@ proc syncBlockRange(m: Eth1Monitor, # Reduce all request rate until we have a more general solution # for dealing with Infura's rate limits await sleepAsync(milliseconds(backoff)) - let jsonLogsFut = m.dataProvider.ns.getJsonLogs( + let jsonLogsFut = depositContract.getJsonLogs( DepositEvent, fromBlock = some blockId(currentBlock), toBlock = some blockId(maxBlockNumberRequested)) depositLogs = try: # Downloading large amounts of deposits may take several minutes - awaitWithTimeout(jsonLogsFut, web3Timeouts): + awaitWithTimeout(jsonLogsFut, 60.seconds): raise newException(DataProviderTimeout, "Request time out while obtaining json logs") except CatchableError as err: @@ -1303,20 +1839,22 @@ proc syncBlockRange(m: Eth1Monitor, for i in 0 ..< blocksWithDeposits.len: let blk = blocksWithDeposits[i] - await blk.fetchTimestampWithRetries(m.dataProvider) + debug "Fetching block timestamp", blockNum = blk.number + await rpcClient.fetchTimestampWithRetries(blk) if blk.number > fullSyncFromBlock: - let lastBlock = m.depositsChain.blocks.peekLast + let lastBlock = m.eth1Chain.blocks.peekLast for n in max(lastBlock.number + 1, fullSyncFromBlock) ..< blk.number: debug "Obtaining block without deposits", blockNum = n - let blockWithoutDeposits = awaitWithRetries( - m.dataProvider.getBlockByNumber(n)) + let blockWithoutDeposits = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(n), + web3RequestsTimeout) - m.depositsChain.addBlock( + m.eth1Chain.addBlock( lastBlock.makeSuccessorWithoutDeposits(blockWithoutDeposits)) eth1_synced_head.set blockWithoutDeposits.number.toGaugeValue - m.depositsChain.addBlock blk + m.eth1Chain.addBlock blk eth1_synced_head.set blk.number.toGaugeValue if blocksWithDeposits.len > 0: @@ -1324,7 +1862,9 @@ proc syncBlockRange(m: Eth1Monitor, template lastBlock: auto = blocksWithDeposits[lastIdx] let status = when hasDepositRootChecks: - awaitWithRetries m.dataProvider.fetchDepositContractData(lastBlock) + awaitOrRaiseOnTimeout( + rpcClient.fetchDepositContractData(depositContract, lastBlock), + web3RequestsTimeout) else: DepositRootUnavailable @@ -1348,33 +1888,44 @@ proc syncBlockRange(m: Eth1Monitor, func init(T: type FullBlockId, blk: Eth1BlockHeader|BlockObject): T = FullBlockId(number: Eth1BlockNumber blk.number, hash: blk.hash) -func isNewLastBlock(m: Eth1Monitor, blk: Eth1BlockHeader|BlockObject): bool = +func isNewLastBlock(m: ELManager, blk: Eth1BlockHeader|BlockObject): bool = m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber -proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = - if m.state == Started: - return +func hasProperlyConfiguredConnection*(m: ELManager): bool = + for connection in m.elConnections: + if connection.etcStatus == EtcStatus.match: + return true - let isFirstRun = m.state == Initialized - let needsReset = m.state in {Failed, ReadyToRestartToPrimary} + return false - m.state = Started +proc startExchangeTransitionConfigurationLoop(m: ELManager) {.async.} = + debug "Starting exchange transition configuration loop" - if delayBeforeStart != ZeroDuration: - await sleepAsync(delayBeforeStart) + if not m.hasProperlyConfiguredConnection: + await m.exchangeTransitionConfiguration() + if not m.hasProperlyConfiguredConnection: + fatal "The Bellatrix hard fork requires the beacon node to be connected to a properly configured Engine API end-point. " & + "See https://nimbus.guide/merge.html for more details." + quit 1 - # If the monitor died with an exception, the web3 provider may be in - # an arbitary state, so we better reset it (not doing this has resulted - # in resource leaks historically). - if not m.dataProvider.isNil and needsReset: - # We introduce a local var to eliminate the risk of scheduling two - # competing calls to `close` below. - let provider = m.dataProvider - m.dataProvider = nil - await provider.close() + while true: + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/specification.md#engine_exchangetransitionconfigurationv1 + await sleepAsync(60.seconds) + debug "Exchange transition configuration tick" + traceAsyncErrors m.exchangeTransitionConfiguration() + +proc syncEth1Chain(m: ELManager, connection: ELConnection) {.async.} = + let rpcClient = await connection.connectedRpcClient() + + let + shouldProcessDeposits = not ( + m.depositContractAddress.isZeroMemory or + m.eth1Chain.finalizedBlockHash.data.isZeroMemory) + + trace "Starting syncEth1Chain", shouldProcessDeposits - await m.ensureDataProvider() - doAssert m.dataProvider != nil, "close not called concurrently" + logScope: + url = connection.engineUrl.url # We might need to reset the chain if the new provider disagrees # with the previous one regarding the history of the chain or if @@ -1386,179 +1937,132 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = # when they don't indicate any errors in the response. When this # happens, we are usually able to download the data successfully # on the second attempt. - if m.latestEth1Block.isSome and m.depositsChain.blocks.len > 0: - let needsReset = m.depositsChain.hasConsensusViolation or (block: + # + # TODO + # Perhaps the above problem was manifesting only with the obsolete + # JSON-RPC data providers, which can no longer be used with Nimbus. + if m.eth1Chain.blocks.len > 0: + let needsReset = m.eth1Chain.hasConsensusViolation or (block: let - lastKnownBlock = m.depositsChain.blocks.peekLast - matchingBlockAtNewProvider = awaitWithRetries( - m.dataProvider.getBlockByNumber lastKnownBlock.number) + lastKnownBlock = m.eth1Chain.blocks.peekLast + matchingBlockAtNewProvider = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(lastKnownBlock.number), + web3RequestsTimeout) lastKnownBlock.hash.asBlockHash != matchingBlockAtNewProvider.hash) if needsReset: - m.depositsChain.clear() - m.latestEth1Block = none(FullBlockId) - - template web3Url: string = m.dataProvider.url - - if web3Url != m.web3Urls[0]: - asyncSpawn m.detectPrimaryProviderComingOnline() - - info "Starting Eth1 deposit contract monitoring", - contract = $m.depositContractAddress - - if isFirstRun and m.eth1Network.isSome: - try: - let - providerChain = - awaitWithRetries m.dataProvider.web3.provider.eth_chainId() - - # https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids - expectedChain = case m.eth1Network.get - of mainnet: 1.Quantity - of ropsten: 3.Quantity - of rinkeby: 4.Quantity - of goerli: 5.Quantity - of sepolia: 11155111.Quantity # https://chainid.network/ - if expectedChain != providerChain: - fatal "The specified Web3 provider serves data for a different chain", - expectedChain = distinctBase(expectedChain), - providerChain = distinctBase(providerChain) - quit 1 - except CatchableError as exc: - # Typically because it's not synced through EIP-155, assuming this Web3 - # endpoint has been otherwise working. - debug "startEth1Syncing: eth_chainId failed: ", - error = exc.msg - - var mustUsePolling = m.forcePolling or - web3Url.startsWith("http://") or - web3Url.startsWith("https://") - - if not mustUsePolling: - proc newBlockHeadersHandler(blk: Eth1BlockHeader) - {.raises: [Defect], gcsafe.} = - try: - if m.isNewLastBlock(blk): - eth1_latest_head.set blk.number.toGaugeValue - m.latestEth1Block = some FullBlockId.init(blk) - m.eth1Progress.fire() - except Exception: - # TODO Investigate why this exception is being raised - raiseAssert "AsyncEvent.fire should not raise exceptions" - - proc subscriptionErrorHandler(err: CatchableError) - {.raises: [Defect], gcsafe.} = - warn "Failed to subscribe for block headers. Switching to polling", - err = err.msg - mustUsePolling = true - - await m.dataProvider.onBlockHeaders(newBlockHeadersHandler, - subscriptionErrorHandler) - - let shouldProcessDeposits = not ( - m.depositContractAddress.isZeroMemory or - m.depositsChain.finalizedBlockHash.data.isZeroMemory) + trace "Resetting the Eth1 chain", + hasConsensusViolation = m.eth1Chain.hasConsensusViolation + m.eth1Chain.clear() var eth1SyncedTo: Eth1BlockNumber if shouldProcessDeposits: - if m.depositsChain.blocks.len == 0: - let startBlock = awaitWithRetries( - m.dataProvider.getBlockByHash( - m.depositsChain.finalizedBlockHash.asBlockHash)) - - m.depositsChain.addBlock Eth1Block( - hash: m.depositsChain.finalizedBlockHash, + if m.eth1Chain.blocks.len == 0: + let finalizedBlockHash = m.eth1Chain.finalizedBlockHash.asBlockHash + let startBlock = + awaitOrRaiseOnTimeout(rpcClient.getBlockByHash(finalizedBlockHash), + web3RequestsTimeout) + + m.eth1Chain.addBlock Eth1Block( + hash: m.eth1Chain.finalizedBlockHash, number: Eth1BlockNumber startBlock.number, timestamp: Eth1BlockTimestamp startBlock.timestamp) - eth1SyncedTo = Eth1BlockNumber m.depositsChain.blocks[^1].number + eth1SyncedTo = m.eth1Chain.blocks[^1].number eth1_synced_head.set eth1SyncedTo.toGaugeValue eth1_finalized_head.set eth1SyncedTo.toGaugeValue eth1_finalized_deposits.set( - m.depositsChain.finalizedDepositsMerkleizer.getChunkCount.toGaugeValue) + m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount.toGaugeValue) - debug "Starting Eth1 syncing", `from` = shortLog(m.depositsChain.blocks[^1]) + debug "Starting Eth1 syncing", `from` = shortLog(m.eth1Chain.blocks[^1]) var didPollOnce = false while true: + debug "syncEth1Chain tick" + if bnStatus == BeaconNodeStatus.Stopping: await m.stop() return - if m.depositsChain.hasConsensusViolation: + if m.eth1Chain.hasConsensusViolation: raise newException(CorruptDataProvider, "Eth1 chain contradicts Eth2 consensus") - if m.state == ReadyToRestartToPrimary: - info "Primary web3 provider is back online. Restarting the Eth1 monitor" - m.startIdx = 0 - return - - let nextBlock = if mustUsePolling or not didPollOnce: - let blk = awaitWithRetries( - m.dataProvider.web3.provider.eth_getBlockByNumber(blockId("latest"), false)) + let latestBlock = try: + awaitOrRaiseOnTimeout( + rpcClient.eth_getBlockByNumber(blockId("latest"), false), + web3RequestsTimeout) + except CatchableError as err: + error "Failed to obtain the latest block from the EL", err = err.msg + raise err - # Same as when handling events, minus `m.eth1Progress` round trip - if m.isNewLastBlock(blk): - eth1_latest_head.set blk.number.toGaugeValue - m.latestEth1Block = some FullBlockId.init(blk) - elif mustUsePolling: - await sleepAsync(m.cfg.SECONDS_PER_ETH1_BLOCK.int.seconds) - continue + m.syncTargetBlock = some( + if Eth1BlockNumber(latestBlock.number) > m.cfg.ETH1_FOLLOW_DISTANCE: + Eth1BlockNumber(latestBlock.number) - m.cfg.ETH1_FOLLOW_DISTANCE else: - doAssert not didPollOnce + Eth1BlockNumber(0)) + if m.syncTargetBlock.get <= eth1SyncedTo: + # The chain reorged to a lower height. + # It's relatively safe to ignore that. + await sleepAsync(m.cfg.SECONDS_PER_ETH1_BLOCK.int.seconds) + continue - didPollOnce = true - blk - else: - awaitWithTimeout(m.eth1Progress.wait(), 5.minutes): - raise newException(CorruptDataProvider, "No eth1 chain progress for too long") + eth1_latest_head.set latestBlock.number.toGaugeValue - m.eth1Progress.clear() + if shouldProcessDeposits and + latestBlock.number.uint64 > m.cfg.ETH1_FOLLOW_DISTANCE: + let depositContract = connection.web3.get.contractSender( + DepositContract, m.depositContractAddress) + await m.syncBlockRange(rpcClient, + depositContract, + eth1SyncedTo + 1, + m.syncTargetBlock.get, + m.earliestBlockOfInterest(Eth1BlockNumber latestBlock.number)) - doAssert m.latestEth1Block.isSome - awaitWithRetries m.dataProvider.getBlockByHash(m.latestEth1Block.get.hash) + eth1SyncedTo = m.syncTargetBlock.get + eth1_synced_head.set eth1SyncedTo.toGaugeValue - if shouldProcessDeposits: - if m.latestEth1BlockNumber <= m.cfg.ETH1_FOLLOW_DISTANCE: - continue +proc startChainSyncingLoop(m: ELManager) {.async.} = + info "Starting execution layer deposits syncing", + contract = $m.depositContractAddress - let targetBlock = m.latestEth1BlockNumber - m.cfg.ETH1_FOLLOW_DISTANCE - if targetBlock <= eth1SyncedTo: + while true: + let connection = awaitWithTimeout( + m.selectConnectionForChainSyncing(), + chronos.seconds(60)): + error "No suitable EL connection for deposit syncing" + await sleepAsync(chronos.seconds(30)) continue - let earliestBlockOfInterest = m.earliestBlockOfInterest() - await m.syncBlockRange(eth1SyncedTo + 1, - targetBlock, - earliestBlockOfInterest) - eth1SyncedTo = targetBlock - eth1_synced_head.set eth1SyncedTo.toGaugeValue - -proc start(m: Eth1Monitor, delayBeforeStart: Duration) {.gcsafe.} = - if m.runFut.isNil: - let runFut = m.startEth1Syncing(delayBeforeStart) - m.runFut = runFut - runFut.addCallback do (p: pointer) {.gcsafe.}: - if runFut.failed: - if runFut == m.runFut: - warn "Eth1 chain monitoring failure, restarting", err = runFut.error.msg - m.state = Failed - - safeCancel m.runFut - m.start(5.seconds) - -proc start*(m: Eth1Monitor) = - m.start(0.seconds) + try: + await syncEth1Chain(m, connection) + except CatchableError as err: + error "EL connection failure while syncing deposits", + url = connection.engineUrl.url, err = err.msg + await sleepAsync(5.seconds) + +proc start*(m: ELManager) {.gcsafe.} = + if m.elConnections.len == 0: + return + + ## Calling `ELManager.start()` on an already started ELManager is a noop + if m.chainSyncingLoopFut.isNil: + m.chainSyncingLoopFut = + m.startChainSyncingLoop() + + if m.hasJwtSecret and m.exchangeTransitionConfigurationLoopFut.isNil: + m.exchangeTransitionConfigurationLoopFut = + m.startExchangeTransitionConfigurationLoop() proc getEth1BlockHash*( - url: string, blockId: RtBlockIdentifier, jwtSecret: Option[seq[byte]]): + url: EngineApiUrl, blockId: RtBlockIdentifier, jwtSecret: Option[seq[byte]]): Future[BlockHash] {.async.} = - let web3 = awaitOrRaiseOnTimeout(newWeb3(url, getJsonRpcRequestHeaders(jwtSecret)), - 10.seconds) + let web3 = awaitOrRaiseOnTimeout(url.newWeb3(), 10.seconds) try: - let blk = awaitWithRetries( - web3.provider.eth_getBlockByNumber(blockId, false)) + let blk = awaitOrRaiseOnTimeout( + web3.provider.eth_getBlockByNumber(blockId, false), + web3RequestsTimeout) return blk.hash finally: await web3.close() @@ -1588,7 +2092,7 @@ proc testWeb3Provider*(web3Url: Uri, stdout.flushFile() var res: typeof(read action) try: - res = awaitWithRetries action + res = awaitOrRaiseOnTimeout(action, web3RequestsTimeout) stdout.write "\r" & actionDesc & ": " & $res except CatchableError as err: stdout.write "\r" & actionDesc & ": Error(" & err.msg & ")" diff --git a/beacon_chain/future_combinators.nim b/beacon_chain/future_combinators.nim new file mode 100644 index 0000000000..e109134db7 --- /dev/null +++ b/beacon_chain/future_combinators.nim @@ -0,0 +1,98 @@ +# TODO: These should be added to the Chronos's asyncfutures2 module +# See https://github.com/status-im/nim-chronos/pull/339 + +import + chronos + +proc firstCompletedFuture*(futs: varargs[FutureBase]): Future[FutureBase] = + ## Returns a future which will complete and return completed FutureBase, + ## when one of the futures in ``futs`` is completed. + ## + ## If the argument is empty, the returned future FAILS immediately. + ## + ## On success, the returned Future will hold the completed FutureBase. + ## + ## If all futures fail naturally or due to cancellation, the returned + ## future will be failed as well. + ## + ## On cancellation, futures in ``futs`` WILL NOT BE cancelled. + + var retFuture = newFuture[FutureBase]("chronos.firstCompletedFuture()") + + # Because we can't capture varargs[T] in closures we need to create copy. + var nfuts = @futs + + # If one of the Future[T] already finished we return it as result + for fut in nfuts: + if fut.completed(): + retFuture.complete(fut) + return retFuture + + if len(nfuts) == 0: + retFuture.fail(newException(ValueError, "Empty Future[T] list")) + return + + var failedFutures = 0 + + var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].} + cb = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + if not(retFuture.finished()): + var res: FutureBase + var rfut = cast[FutureBase](udata) + if rfut.completed: + for i in 0..= ConsensusFork.Bellatrix: @@ -264,35 +265,27 @@ from ../spec/datatypes/bellatrix import ExecutionPayload, SignedBeaconBlock from ../spec/datatypes/capella import ExecutionPayload, SignedBeaconBlock, asTrusted, shortLog +# TODO investigate why this seems to allow compilation even though it doesn't +# directly address deneb.ExecutionPayload when complaint was that it didn't +# know about "deneb" +from ../spec/datatypes/deneb import SignedBeaconBlock, asTrusted, shortLog +from ../eth1/eth1_monitor import hasProperlyConfiguredConnection + proc newExecutionPayload*( - eth1Monitor: Eth1Monitor, - executionPayload: bellatrix.ExecutionPayload | capella.ExecutionPayload | - eip4844.ExecutionPayload): + elManager: ELManager, + executionPayload: ForkyExecutionPayload): Future[Opt[PayloadExecutionStatus]] {.async.} = - if eth1Monitor.isNil: + + if not elManager.hasProperlyConfiguredConnection: + debug "No EL connection for newPayload" return Opt.none PayloadExecutionStatus debug "newPayload: inserting block into execution engine", executionPayload = shortLog(executionPayload) - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#request - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/shanghai.md#request - const NEWPAYLOAD_TIMEOUT = 8.seconds - try: - let - payloadResponse = - awaitWithTimeout( - eth1Monitor.newPayload( - executionPayload.asEngineExecutionPayload), - NEWPAYLOAD_TIMEOUT): - info "newPayload: newPayload timed out" - return Opt.none PayloadExecutionStatus - - # Placeholder for type system - PayloadStatusV1(status: PayloadExecutionStatus.syncing) - - payloadStatus = payloadResponse.status + let payloadStatus = await elManager.sendNewPayload( + executionPayload.asEngineExecutionPayload) debug "newPayload: succeeded", parentHash = executionPayload.parent_hash, @@ -309,28 +302,17 @@ proc newExecutionPayload*( blockNumber = executionPayload.block_number return Opt.none PayloadExecutionStatus -# TODO investigate why this seems to allow compilation even though it doesn't -# directly address eip4844.ExecutionPayload when complaint was that it didn't -# know about "eip4844" -from ../spec/datatypes/eip4844 import SignedBeaconBlock, asTrusted, shortLog - proc getExecutionValidity( - eth1Monitor: Eth1Monitor, + elManager: ELManager, blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | eip4844.SignedBeaconBlock): Future[NewPayloadStatus] {.async.} = if not blck.message.is_execution_block: return NewPayloadStatus.valid # vacuously - if eth1Monitor.isNil: - return NewPayloadStatus.noResponse - try: - # Minimize window for Eth1 monitor to shut down connection - await eth1Monitor.ensureDataProvider() - - let executionPayloadStatus = await newExecutionPayload( - eth1Monitor, blck.message.body.execution_payload) + let executionPayloadStatus = await elManager.newExecutionPayload( + blck.message.body.execution_payload) if executionPayloadStatus.isNone: return NewPayloadStatus.noResponse @@ -378,7 +360,7 @@ proc storeBlock*( NewPayloadStatus.noResponse else: when typeof(signedBlock).toFork() >= ConsensusFork.Bellatrix: - await self.consensusManager.eth1Monitor.getExecutionValidity(signedBlock) + await self.consensusManager.elManager.getExecutionValidity(signedBlock) else: NewPayloadStatus.valid # vacuously payloadValid = payloadStatus == NewPayloadStatus.valid @@ -508,7 +490,7 @@ proc storeBlock*( wallSlot.start_beacon_time) if newHead.isOk: - template eth1Monitor(): auto = self.consensusManager.eth1Monitor + template elManager(): auto = self.consensusManager.elManager if self.consensusManager[].shouldSyncOptimistically(wallSlot): # Optimistic head is far in the future; report it as head block to EL. @@ -526,7 +508,7 @@ proc storeBlock*( # - "Beacon chain gapped" from DAG head to optimistic head, # - followed by "Beacon chain reorged" from optimistic head back to DAG. self.consensusManager[].updateHead(newHead.get.blck) - discard await eth1Monitor.runForkchoiceUpdated( + discard await elManager.forkchoiceUpdated( headBlockHash = self.consensusManager[].optimisticExecutionPayloadHash, safeBlockHash = newHead.get.safeExecutionPayloadHash, finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash) @@ -548,7 +530,7 @@ proc storeBlock*( if self.consensusManager.checkNextProposer(wallSlot).isNone: # No attached validator is next proposer, so use non-proposal fcU - await eth1Monitor.expectValidForkchoiceUpdated( + await elManager.expectValidForkchoiceUpdated( headBlockHash = headExecutionPayloadHash, safeBlockHash = newHead.get.safeExecutionPayloadHash, finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash, diff --git a/beacon_chain/networking/network_metadata.nim b/beacon_chain/networking/network_metadata.nim index aa6b2d9963..d7a624a3e4 100644 --- a/beacon_chain/networking/network_metadata.nim +++ b/beacon_chain/networking/network_metadata.nim @@ -45,7 +45,7 @@ type # branch is not active and thus it will override the first variable # in this branch. dummy: string - # If the eth1Network is specified, the Eth1Monitor will perform some + # If the eth1Network is specified, the ELManager will perform some # additional checks to ensure we are connecting to a web3 provider # serving data for the same network. The value can be set to `None` # for custom networks and testing purposes. diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 3471038e48..039e8eb102 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -9,7 +9,7 @@ import std/[os, random, sequtils, terminal, times], - chronos, chronicles, chronicles/chronos_tools, + chronos, chronicles, metrics, metrics/chronos_httpserver, stew/[byteutils, io2], eth/p2p/discoveryv5/[enr, random2], @@ -271,12 +271,12 @@ proc initFullNode( # This `nimcall` functions helps for keeping track of what # needs to be captured by the onFinalization closure. eventBus: EventBus, - eth1Monitor: Eth1Monitor): OnFinalizedCallback {.nimcall.} = - static: doAssert (eth1Monitor is ref) + elManager: ELManager): OnFinalizedCallback {.nimcall.} = + static: doAssert (elManager is ref) return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - if eth1Monitor != nil: + if elManager != nil: let finalizedEpochRef = dag.getFinalizedEpochRef() - discard trackFinalizedState(eth1Monitor, + discard trackFinalizedState(elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index) node.updateLightClientFromDag() @@ -316,7 +316,7 @@ proc initFullNode( validatorChangePool = newClone( ValidatorChangePool.init(dag, attestationPool, onVoluntaryExitAdded)) consensusManager = ConsensusManager.new( - dag, attestationPool, quarantine, node.eth1Monitor, + dag, attestationPool, quarantine, node.elManager, ActionTracker.init(rng, config.subscribeAllSubnets), node.dynamicFeeRecipientsStore, config.validatorsDir, config.defaultFeeRecipient, config.suggestedGasLimit) @@ -381,7 +381,7 @@ proc initFullNode( router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates - dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.eth1Monitor) + dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) dag.setBlockCb(onBlockAdded) dag.setHeadCb(onHeadChanged) dag.setReorgCb(onChainReorg) @@ -505,13 +505,11 @@ proc init*(T: type BeaconNode, quit 1 db.putDepositTreeSnapshot(depositTreeSnapshot) - let optJwtSecret = rng[].loadJwtSecret(config, allowCreate = false) + let engineApiUrls = config.engineApiUrls - if config.web3Urls.len() == 0: + if engineApiUrls.len == 0: notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" - var eth1Monitor: Eth1Monitor - var genesisState = if metadata.genesisData.len > 0: try: @@ -593,17 +591,13 @@ proc init*(T: type BeaconNode, dag.checkWeakSubjectivityCheckpoint( config.weakSubjectivityCheckpoint.get, beaconClock) - if eth1Monitor.isNil and config.web3Urls.len > 0: - eth1Monitor = Eth1Monitor.init( - cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db, - getBeaconTime, - config.web3Urls, - eth1Network, - config.web3ForcePolling, - optJwtSecret) + let elManager = ELManager.new( + cfg, + metadata.depositContractBlock, + metadata.depositContractBlockHash, + db, + engineApiUrls, + eth1Network) if config.rpcEnabled.isSome: warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." @@ -692,7 +686,7 @@ proc init*(T: type BeaconNode, db: db, config: config, attachedValidators: validatorPool, - eth1Monitor: eth1Monitor, + elManager: elManager, payloadBuilderRestClient: payloadBuilderRestClient, restServer: restServer, keymanagerHost: keymanagerHost, @@ -704,13 +698,6 @@ proc init*(T: type BeaconNode, beaconClock: beaconClock, validatorMonitor: validatorMonitor, stateTtlCache: stateTtlCache, - nextExchangeTransitionConfTime: - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#specification-3 - # Consensus Layer client software **SHOULD** poll this endpoint every - # 60 seconds. - # Delay first call by that time to allow for EL syncing to begin; it can - # otherwise generate an EL warning by claiming a zero merge block. - Moment.now + chronos.seconds(60), dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) node.initLightClient( @@ -1347,17 +1334,6 @@ proc onSecond(node: BeaconNode, time: Moment) = # Nim GC metrics (for the main thread) updateThreadMetrics() - if time >= node.nextExchangeTransitionConfTime and not node.eth1Monitor.isNil: - # Execution Layer client software **SHOULD** surface an error to the user - # if it does not receive a request on this endpoint at least once every 120 - # seconds. - # To accommodate for that, exchange slightly more frequently. - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#specification-3 - node.nextExchangeTransitionConfTime = time + chronos.seconds(45) - - if node.currentSlot.epoch >= node.dag.cfg.BELLATRIX_FORK_EPOCH: - traceAsyncErrors node.eth1Monitor.exchangeTransitionConfiguration() - if node.config.stopAtSyncedEpoch != 0 and node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: notice "Shutting down after having reached the target synced epoch" @@ -1676,9 +1652,7 @@ proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} = waitFor node.initializeNetworking() - if node.eth1Monitor != nil: - node.eth1Monitor.start() - + node.elManager.start() node.run() func formatGwei(amount: uint64): string = diff --git a/beacon_chain/nimbus_light_client.nim b/beacon_chain/nimbus_light_client.nim index 1b9be41079..d16fb62b0b 100644 --- a/beacon_chain/nimbus_light_client.nim +++ b/beacon_chain/nimbus_light_client.nim @@ -16,7 +16,6 @@ import ./spec/datatypes/[phase0, altair, bellatrix, capella, eip4844], "."/[filepath, light_client, light_client_db, nimbus_binary_common, version] -from ./consensus_object_pools/consensus_manager import runForkchoiceUpdated from ./gossip_processing/block_processor import newExecutionPayload from ./gossip_processing/eth2_processor import toValidationResult @@ -87,21 +86,16 @@ programMain: network = createEth2Node( rng, config, netKeys, cfg, forkDigests, getBeaconTime, genesis_validators_root) - - eth1Monitor = - if config.web3Urls.len > 0: - let res = Eth1Monitor.init( + engineApiUrls = config.engineApiUrls + elManager = + if engineApiUrls.len > 0: + ELManager.new( cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db = nil, - getBeaconTime, - config.web3Urls, - metadata.eth1Network, - forcePolling = false, - rng[].loadJwtSecret(config, allowCreate = false)) - waitFor res.ensureDataProvider() - res + engineApiUrls, + metadata.eth1Network) else: nil @@ -115,14 +109,9 @@ programMain: if blck.message.is_execution_block: template payload(): auto = blck.message.body.execution_payload - if eth1Monitor != nil and not payload.block_hash.isZero: - await eth1Monitor.ensureDataProvider() - - # engine_newPayloadV1 - discard await eth1Monitor.newExecutionPayload(payload) - - # engine_forkchoiceUpdatedV1 - discard await eth1Monitor.runForkchoiceUpdated( + if elManager != nil and not payload.block_hash.isZero: + discard await elManager.newExecutionPayload(payload) + discard await elManager.forkchoiceUpdated( headBlockHash = payload.block_hash, safeBlockHash = payload.block_hash, # stub value finalizedBlockHash = ZERO_HASH) @@ -224,7 +213,7 @@ programMain: func shouldSyncOptimistically(wallSlot: Slot): bool = # Check whether an EL is connected - if eth1Monitor == nil: + if elManager == nil: return false isSynced(wallSlot) @@ -323,16 +312,8 @@ programMain: nextSlot = wallSlot + 1 timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime() - var nextExchangeTransitionConfTime = Moment.now + chronos.seconds(60) proc onSecond(time: Moment) = let wallSlot = getBeaconTime().slotOrZero() - - # engine_exchangeTransitionConfigurationV1 - if time > nextExchangeTransitionConfTime and eth1Monitor != nil: - nextExchangeTransitionConfTime = time + chronos.seconds(45) - if wallSlot.epoch >= cfg.BELLATRIX_FORK_EPOCH: - traceAsyncErrors eth1Monitor.exchangeTransitionConfiguration() - if checkIfShouldStopAtEpoch(wallSlot, config.stopAtEpoch): quit(0) diff --git a/beacon_chain/rpc/rest_nimbus_api.nim b/beacon_chain/rpc/rest_nimbus_api.nim index fbba496896..37fd2c06a0 100644 --- a/beacon_chain/rpc/rest_nimbus_api.nim +++ b/beacon_chain/rpc/rest_nimbus_api.nim @@ -227,11 +227,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) = router.api(MethodGet, "/nimbus/v1/eth1/chain") do ( ) -> RestApiResponse: - let res = - if not(isNil(node.eth1Monitor)): - mapIt(node.eth1Monitor.depositChainBlocks, it) - else: - @[] + let res = mapIt(node.elManager.eth1ChainBlocks, it) return RestApiResponse.jsonResponse(res) router.api(MethodGet, "/nimbus/v1/eth1/proposal_data") do ( diff --git a/beacon_chain/rpc/rest_validator_api.nim b/beacon_chain/rpc/rest_validator_api.nim index 6da6f77a94..153e3932e1 100644 --- a/beacon_chain/rpc/rest_validator_api.nim +++ b/beacon_chain/rpc/rest_validator_api.nim @@ -375,15 +375,32 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue) let res = - if qslot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH: - await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload]( + case node.dag.cfg.consensusForkAtEpoch(qslot.epoch) + of ConsensusFork.Deneb: + # TODO + # We should return a block with sidecars here + # https://github.com/ethereum/beacon-APIs/pull/302/files + # The code paths leading to makeBeaconBlockForHeadAndSlot are already + # partially refactored to make it possible to return the blobs from + # the call, but the signature of the call needs to be changed furhter + # to access the blobs here. + discard $denebImplementationMissing + await makeBeaconBlockForHeadAndSlot( + deneb.ExecutionPayloadForSigning, node, qrandao, proposer, qgraffiti, qhead, qslot) - else: - await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( + of ConsensusFork.Capella: + await makeBeaconBlockForHeadAndSlot( + capella.ExecutionPayloadForSigning, + node, qrandao, proposer, qgraffiti, qhead, qslot) + of ConsensusFork.Bellatrix: + await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayloadForSigning, node, qrandao, proposer, qgraffiti, qhead, qslot) + of ConsensusFork.Altair, ConsensusFork.Phase0: + return RestApiResponse.jsonError(Http400, InvalidSlotValueError) if res.isErr(): return RestApiResponse.jsonError(Http400, res.error()) - res.get() + res.get return RestApiResponse.jsonResponsePlain(message) # https://ethereum.github.io/beacon-APIs/#/Validator/produceBlindedBlock @@ -475,10 +492,13 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonError(Http500, InvalidAcceptError) static: doAssert high(ConsensusFork) == ConsensusFork.Deneb - let currentEpoch = node.currentSlot().epoch() - if currentEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: + case node.dag.cfg.consensusForkAtEpoch(node.currentSlot.epoch) + of ConsensusFork.Deneb: + # TODO + # We should return a block with sidecars here + # https://github.com/ethereum/beacon-APIs/pull/302/files debugRaiseAssert $denebImplementationMissing & ": GET /eth/v1/validator/blinded_blocks/{slot}" - elif currentEpoch >= node.dag.cfg.CAPELLA_FORK_EPOCH: + of ConsensusFork.Capella: let res = await makeBlindedBeaconBlockForHeadAndSlot[ capella_mev.BlindedBeaconBlock]( node, qrandao, proposer, qgraffiti, qhead, qslot) @@ -487,7 +507,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return responsePlain(ForkedBlindedBeaconBlock( kind: ConsensusFork.Capella, capellaData: res.get())) - elif currentEpoch >= node.dag.cfg.BELLATRIX_FORK_EPOCH: + of ConsensusFork.Bellatrix: let res = await makeBlindedBeaconBlockForHeadAndSlot[ bellatrix_mev.BlindedBeaconBlock]( node, qrandao, proposer, qgraffiti, qhead, qslot) @@ -496,13 +516,14 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return responsePlain(ForkedBlindedBeaconBlock( kind: ConsensusFork.Bellatrix, bellatrixData: res.get())) - else: + of ConsensusFork.Altair, ConsensusFork.Phase0: # Pre-Bellatrix, this endpoint will return a BeaconBlock - let res = await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( - node, qrandao, proposer, qgraffiti, qhead, qslot) + let res = await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayloadForSigning, node, qrandao, + proposer, qgraffiti, qhead, qslot) if res.isErr(): return RestApiResponse.jsonError(Http400, res.error()) - return responsePlain(res.get()) + return responsePlain(res.get) # https://ethereum.github.io/beacon-APIs/#/Validator/produceAttestationData router.api(MethodGet, "/eth/v1/validator/attestation_data") do ( diff --git a/beacon_chain/spec/datatypes/base.nim b/beacon_chain/spec/datatypes/base.nim index 6590327efc..800b377f76 100644 --- a/beacon_chain/spec/datatypes/base.nim +++ b/beacon_chain/spec/datatypes/base.nim @@ -167,6 +167,7 @@ type ## The `SubnetId` type is constrained to values in the range ## `[0, ATTESTATION_SUBNET_COUNT)` during initialization. + Wei* = UInt256 Gwei* = uint64 # BitVector[4] in the spec, ie 4 bits which end up encoded as a byte for diff --git a/beacon_chain/spec/datatypes/bellatrix.nim b/beacon_chain/spec/datatypes/bellatrix.nim index 7fb436fd0c..1fb7964b51 100644 --- a/beacon_chain/spec/datatypes/bellatrix.nim +++ b/beacon_chain/spec/datatypes/bellatrix.nim @@ -29,6 +29,10 @@ const # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/shanghai.md#request-1 FORKCHOICEUPDATED_TIMEOUT* = 8.seconds + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#request + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/shanghai.md#request + NEWPAYLOAD_TIMEOUT* = 8.seconds + type # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/bellatrix/beacon-chain.md#custom-types Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] @@ -60,6 +64,10 @@ type block_hash*: Eth2Digest # Hash of execution block transactions*: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + ExecutionPayloadForSigning* = object + executionPayload*: ExecutionPayload + blockValue*: Wei + # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/bellatrix/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object parent_hash*: Eth2Digest diff --git a/beacon_chain/spec/datatypes/capella.nim b/beacon_chain/spec/datatypes/capella.nim index 9462ebd4cb..cbed0568f7 100644 --- a/beacon_chain/spec/datatypes/capella.nim +++ b/beacon_chain/spec/datatypes/capella.nim @@ -83,6 +83,10 @@ type transactions*: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] # [New in Capella] + ExecutionPayloadForSigning* = object + executionPayload*: ExecutionPayload + blockValue*: Wei + # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/capella/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object parent_hash*: Eth2Digest diff --git a/beacon_chain/spec/datatypes/eip4844.nim b/beacon_chain/spec/datatypes/eip4844.nim index bd84448325..6b355ef509 100644 --- a/beacon_chain/spec/datatypes/eip4844.nim +++ b/beacon_chain/spec/datatypes/eip4844.nim @@ -44,7 +44,8 @@ type KZGProof* = array[48, byte] BLSFieldElement* = array[32, byte] - KZGCommitmentList* = List[KZGCommitment, Limit MAX_BLOBS_PER_BLOCK] + KZGCommitments* = List[KZGCommitment, Limit MAX_BLOBS_PER_BLOCK] + Blobs* = List[Blob, Limit MAX_BLOBS_PER_BLOCK] # TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but # current spec doesn't ever SSZ-serialize it or hash_tree_root it @@ -59,7 +60,7 @@ type BlobsSidecar* = object beacon_block_root*: Eth2Digest beacon_block_slot*: Slot - blobs*: List[Blob, Limit MAX_BLOBS_PER_BLOCK] + blobs*: Blobs kzg_aggregated_proof*: KZGProof # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/deneb/p2p-interface.md#blobsidecar @@ -104,6 +105,12 @@ type withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] excess_data_gas*: UInt256 # [New in Deneb] + ExecutionPayloadForSigning* = object + executionPayload*: ExecutionPayload + blockValue*: Wei + kzgs*: KZGCommitments + blobs*: Blobs + # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/eip4844/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/deneb/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object @@ -381,7 +388,7 @@ type # Execution execution_payload*: ExecutionPayload bls_to_execution_changes*: SignedBLSToExecutionChangeList - blob_kzg_commitments*: KZGCommitmentList # [New in EIP-4844] + blob_kzg_commitments*: KZGCommitments # [New in EIP-4844] SigVerifiedBeaconBlockBody* = object ## A BeaconBlock body with signatures verified diff --git a/beacon_chain/spec/engine_authentication.nim b/beacon_chain/spec/engine_authentication.nim index a182d8ef4c..a07c16e0ce 100644 --- a/beacon_chain/spec/engine_authentication.nim +++ b/beacon_chain/spec/engine_authentication.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - chronicles, + chronicles, confutils/defs, bearssl/rand, nimcrypto/[hmac, utils], stew/[byteutils, results] @@ -21,6 +21,9 @@ export rand, results {.push raises: [].} +const + JWT_SECRET_LEN = 32 + proc base64urlEncode(x: auto): string = # The only strings this gets are internally generated, and don't have # encoding quirks. @@ -58,15 +61,34 @@ proc getSignedToken*(key: openArray[byte], payload: string): string = proc getSignedIatToken*(key: openArray[byte], time: int64): string = getSignedToken(key, $getIatToken(time)) +proc parseJwtTokenValue*(input: string): Result[seq[byte], cstring] = + # Secret JWT key is parsed in constant time using nimcrypto: + # https://github.com/cheatfate/nimcrypto/pull/44 + let secret = utils.fromHex(input) + if secret.len == JWT_SECRET_LEN: + ok(secret) + else: + err("The JWT secret should be 256 bits and hex-encoded") + +proc loadJwtSecretFile*(jwtSecretFile: InputFile): Result[seq[byte], cstring] = + try: + let lines = readLines(string jwtSecretFile, 1) + if lines.len > 0: + parseJwtTokenValue(lines[0]) + else: + err("The JWT token file should not be empty") + except IOError: + err("couldn't open specified JWT secret file") + except ValueError: + err("invalid JWT hex string") + proc checkJwtSecret*( - rng: var HmacDrbgContext, dataDir: string, jwtSecret: Option[string]): + rng: var HmacDrbgContext, dataDir: string, jwtSecret: Option[InputFile]): Result[seq[byte], cstring] = # If such a parameter is given, but the file cannot be read, or does not # contain a hex-encoded key of 256 bits, the client should treat this as an # error: either abort the startup, or show error and continue without # exposing the authenticated port. - const SECRET_LEN = 32 - if jwtSecret.isNone: # If such a parameter is not given, the client SHOULD generate such a # token, valid for the duration of the execution, and store the @@ -77,7 +99,7 @@ proc checkJwtSecret*( const jwtSecretFilename = "jwt.hex" let jwtSecretPath = dataDir / jwtSecretFilename - let newSecret = rng.generateBytes(SECRET_LEN) + let newSecret = rng.generateBytes(JWT_SECRET_LEN) try: writeFile(jwtSecretPath, newSecret.to0xHex()) except IOError as exc: @@ -88,20 +110,4 @@ proc checkJwtSecret*( err = exc.msg return ok(newSecret) - try: - # TODO replace with separate function - let lines = readLines(jwtSecret.get, 1) - if lines.len > 0: - # Secret JWT key is parsed in constant time using nimcrypto: - # https://github.com/cheatfate/nimcrypto/pull/44 - let secret = utils.fromHex(lines[0]) - if secret.len == SECRET_LEN: - ok(secret) - else: - err("JWT secret not 256 bits") - else: - err("no hex string found") - except IOError: - err("couldn't open specified JWT secret file") - except ValueError: - err("invalid JWT hex string") + loadJwtSecretFile(jwtSecret.get) diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index 163ee3f84f..f2c2fdb76a 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -132,6 +132,11 @@ type ForkySigVerifiedBeaconBlock | ForkyTrustedBeaconBlock + ForkyExecutionPayloadForSigning* = + bellatrix.ExecutionPayloadForSigning | + capella.ExecutionPayloadForSigning | + deneb.ExecutionPayloadForSigning + ForkedBeaconBlock* = object case kind*: ConsensusFork of ConsensusFork.Phase0: phase0Data*: phase0.BeaconBlock @@ -479,6 +484,7 @@ template toFork*[T: bellatrix.BeaconState | bellatrix.HashedBeaconState | bellatrix.ExecutionPayload | + bellatrix.ExecutionPayloadForSigning | bellatrix.ExecutionPayloadHeader | bellatrix.BeaconBlock | bellatrix.SignedBeaconBlock | @@ -493,6 +499,7 @@ template toFork*[T: capella.BeaconState | capella.HashedBeaconState | capella.ExecutionPayload | + capella.ExecutionPayloadForSigning | capella.ExecutionPayloadHeader | capella.BeaconBlock | capella.SignedBeaconBlock | @@ -507,6 +514,7 @@ template toFork*[T: deneb.BeaconState | deneb.HashedBeaconState | deneb.ExecutionPayload | + deneb.ExecutionPayloadForSigning | deneb.ExecutionPayloadHeader | deneb.BeaconBlock | deneb.SignedBeaconBlock | diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 97df19b908..952e324cb0 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -457,7 +457,7 @@ proc compute_execution_block_hash*( proc build_empty_execution_payload*( state: bellatrix.BeaconState, - feeRecipient: Eth1Address): bellatrix.ExecutionPayload = + feeRecipient: Eth1Address): bellatrix.ExecutionPayloadForSigning = ## Assuming a pre-state of the same slot, build a valid ExecutionPayload ## without any transactions. let @@ -468,18 +468,21 @@ proc build_empty_execution_payload*( GasInt.saturate latest.gas_used, latest.base_fee_per_gas) - var payload = bellatrix.ExecutionPayload( - parent_hash: latest.block_hash, - fee_recipient: bellatrix.ExecutionAddress(data: distinctBase(feeRecipient)), - state_root: latest.state_root, # no changes to the state - receipts_root: EMPTY_ROOT_HASH, - block_number: latest.block_number + 1, - prev_randao: randao_mix, - gas_limit: latest.gas_limit, # retain same limit - gas_used: 0, # empty block, 0 gas - timestamp: timestamp, - base_fee_per_gas: base_fee) - - payload.block_hash = payload.compute_execution_block_hash() + var payload = bellatrix.ExecutionPayloadForSigning( + executionPayload: bellatrix.ExecutionPayload( + parent_hash: latest.block_hash, + fee_recipient: bellatrix.ExecutionAddress(data: distinctBase(feeRecipient)), + state_root: latest.state_root, # no changes to the state + receipts_root: EMPTY_ROOT_HASH, + block_number: latest.block_number + 1, + prev_randao: randao_mix, + gas_limit: latest.gas_limit, # retain same limit + gas_used: 0, # empty block, 0 gas + timestamp: timestamp, + base_fee_per_gas: base_fee), + blockValue: Wei.zero) + + payload.executionPayload.block_hash = + payload.executionPayload.compute_execution_block_hash() payload diff --git a/beacon_chain/spec/state_transition.nim b/beacon_chain/spec/state_transition.nim index 2e9ac9337b..b06477f060 100644 --- a/beacon_chain/spec/state_transition.nim +++ b/beacon_chain/spec/state_transition.nim @@ -343,8 +343,7 @@ template partialBeaconBlock*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - blob_kzg_commitments: KZGCommitmentList, - execution_payload: bellatrix.ExecutionPayload): + execution_payload: bellatrix.ExecutionPayloadForSigning): phase0.BeaconBlock = phase0.BeaconBlock( slot: state.data.slot, @@ -372,8 +371,7 @@ template partialBeaconBlock*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - blob_kzg_commitments: KZGCommitmentList, - execution_payload: bellatrix.ExecutionPayload): + execution_payload: bellatrix.ExecutionPayloadForSigning): altair.BeaconBlock = altair.BeaconBlock( slot: state.data.slot, @@ -402,8 +400,7 @@ template partialBeaconBlock*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - blob_kzg_commitments: KZGCommitmentList, - execution_payload: bellatrix.ExecutionPayload): + execution_payload: bellatrix.ExecutionPayloadForSigning): bellatrix.BeaconBlock = bellatrix.BeaconBlock( slot: state.data.slot, @@ -419,7 +416,7 @@ template partialBeaconBlock*( deposits: List[Deposit, Limit MAX_DEPOSITS](deposits), voluntary_exits: validator_changes.voluntary_exits, sync_aggregate: sync_aggregate, - execution_payload: execution_payload)) + execution_payload: execution_payload.executionPayload)) # https://github.com/ethereum/consensus-specs/blob/v1.1.3/specs/merge/validator.md#block-proposal template partialBeaconBlock*( @@ -433,9 +430,7 @@ template partialBeaconBlock*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - blob_kzg_commitments: KZGCommitmentList, - execution_payload: capella.ExecutionPayload, - ): + execution_payload: capella.ExecutionPayloadForSigning): capella.BeaconBlock = capella.BeaconBlock( slot: state.data.slot, @@ -451,7 +446,7 @@ template partialBeaconBlock*( deposits: List[Deposit, Limit MAX_DEPOSITS](deposits), voluntary_exits: validator_changes.voluntary_exits, sync_aggregate: sync_aggregate, - execution_payload: execution_payload, + execution_payload: execution_payload.executionPayload, bls_to_execution_changes: validator_changes.bls_to_execution_changes )) @@ -467,9 +462,7 @@ template partialBeaconBlock*( deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - kzg_commitments: deneb.KZGCommitmentList, - execution_payload: deneb.ExecutionPayload, - ): + execution_payload: deneb.ExecutionPayloadForSigning): deneb.BeaconBlock = eip4844.BeaconBlock( slot: state.data.slot, @@ -485,13 +478,12 @@ template partialBeaconBlock*( deposits: List[Deposit, Limit MAX_DEPOSITS](deposits), voluntary_exits: validator_changes.voluntary_exits, sync_aggregate: sync_aggregate, - execution_payload: execution_payload, + execution_payload: execution_payload.executionPayload, bls_to_execution_changes: validator_changes.bls_to_execution_changes, - blob_kzg_commitments: kzg_commitments + blob_kzg_commitments: execution_payload.kzgs )) -proc makeBeaconBlock*[T: bellatrix.ExecutionPayload | capella.ExecutionPayload | - deneb.ExecutionPayload]( +proc makeBeaconBlock*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, proposer_index: ValidatorIndex, @@ -502,8 +494,7 @@ proc makeBeaconBlock*[T: bellatrix.ExecutionPayload | capella.ExecutionPayload | deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - executionPayload: T, - blob_kzg_commitments: KZGCommitmentList, + executionPayload: ForkyExecutionPayloadForSigning, rollback: RollbackForkedHashedProc, cache: var StateCache, # TODO: @@ -527,7 +518,7 @@ proc makeBeaconBlock*[T: bellatrix.ExecutionPayload | capella.ExecutionPayload | partialBeaconBlock( cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data, graffiti, attestations, deposits, validator_changes, sync_aggregate, - blob_kzg_commitments, executionPayload)) + executionPayload)) let res = process_block( cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(), @@ -589,60 +580,63 @@ proc makeBeaconBlock*[T: bellatrix.ExecutionPayload | capella.ExecutionPayload | ok(blck) - when T is bellatrix.ExecutionPayload: + const payloadFork = typeof(executionPayload).toFork + when payloadFork == ConsensusFork.Bellatrix: case state.kind of ConsensusFork.Phase0: makeBeaconBlock(phase0) of ConsensusFork.Altair: makeBeaconBlock(altair) of ConsensusFork.Bellatrix: makeBeaconBlock(bellatrix) of ConsensusFork.Capella, ConsensusFork.Deneb: raiseAssert "Attempt to use Bellatrix payload with post-Bellatrix state" - elif T is capella.ExecutionPayload: + elif payloadFork == ConsensusFork.Capella: case state.kind of ConsensusFork.Phase0, ConsensusFork.Altair, ConsensusFork.Bellatrix, ConsensusFork.Deneb: raiseAssert "Attempt to use Capella payload with non-Capella state" of ConsensusFork.Capella: makeBeaconBlock(capella) - elif T is deneb.ExecutionPayload: + elif payloadFork == ConsensusFork.Deneb: case state.kind of ConsensusFork.Phase0, ConsensusFork.Altair, ConsensusFork.Bellatrix, ConsensusFork.Capella: raiseAssert "Attempt to use EIP4844 payload with non-EIP4844 state" of ConsensusFork.Deneb: makeBeaconBlock(deneb) + else: + {.error: "You need to add support for the next fork".} # workaround for https://github.com/nim-lang/Nim/issues/20900 rather than have # these be default arguments -proc makeBeaconBlock*[T]( +proc makeBeaconBlock*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, eth1_data: Eth1Data, graffiti: GraffitiBytes, attestations: seq[Attestation], deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, executionPayload: T, - blob_kzg_commitments: KZGCommitmentList, + sync_aggregate: SyncAggregate, + executionPayload: ForkyExecutionPayloadForSigning, rollback: RollbackForkedHashedProc, cache: var StateCache): Result[ForkedBeaconBlock, cstring] = makeBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, attestations, deposits, validator_changes, sync_aggregate, - executionPayload, blob_kzg_commitments, rollback, cache, + executionPayload, rollback, cache, verificationFlags = {}, transactions_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest) -proc makeBeaconBlock*[T]( +proc makeBeaconBlock*( cfg: RuntimeConfig, state: var ForkedHashedBeaconState, proposer_index: ValidatorIndex, randao_reveal: ValidatorSig, eth1_data: Eth1Data, graffiti: GraffitiBytes, attestations: seq[Attestation], deposits: seq[Deposit], validator_changes: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, executionPayload: T, - blob_kzg_commitments: KZGCommitmentList, + sync_aggregate: SyncAggregate, + executionPayload: ForkyExecutionPayloadForSigning, rollback: RollbackForkedHashedProc, cache: var StateCache, verificationFlags: UpdateFlags): Result[ForkedBeaconBlock, cstring] = makeBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, attestations, deposits, validator_changes, sync_aggregate, - executionPayload, blob_kzg_commitments, rollback, cache, + executionPayload, rollback, cache, verificationFlags = verificationFlags, transactions_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest) diff --git a/beacon_chain/validators/keystore_management.nim b/beacon_chain/validators/keystore_management.nim index 7acdc66960..3d55b0f8aa 100644 --- a/beacon_chain/validators/keystore_management.nim +++ b/beacon_chain/validators/keystore_management.nim @@ -1300,9 +1300,11 @@ proc generateDistributedStore*(rng: var HmacDrbgContext, ? saveKeystore(rng, shareValidatorDir / $share.id, shareSecretsDir / $share.id, - share.key, share.key.toPubKey, + share.key, + share.key.toPubKey, makeKeyPath(validatorIdx, signingKeyKind), - password.str, @[], + password.str, + @[], mode) signers.add RemoteSignerInfo( diff --git a/beacon_chain/validators/validator_duties.nim b/beacon_chain/validators/validator_duties.nim index 5c1bd35347..3d1de7cf5c 100644 --- a/beacon_chain/validators/validator_duties.nim +++ b/beacon_chain/validators/validator_duties.nim @@ -259,71 +259,10 @@ proc createAndSendAttestation(node: BeaconNode, proc getBlockProposalEth1Data*(node: BeaconNode, state: ForkedHashedBeaconState): BlockProposalEth1Data = - if node.eth1Monitor.isNil: - let pendingDepositsCount = - getStateField(state, eth1_data).deposit_count - - getStateField(state, eth1_deposit_index) - if pendingDepositsCount > 0: - result.hasMissingDeposits = true - else: - result.vote = getStateField(state, eth1_data) - else: - let finalizedEpochRef = node.dag.getFinalizedEpochRef() - result = node.eth1Monitor.getBlockProposalData( - state, finalizedEpochRef.eth1_data, - finalizedEpochRef.eth1_deposit_index) - -from web3/engine_api import ForkchoiceUpdatedResponse - -proc forkchoice_updated( - head_block_hash: Eth2Digest, safe_block_hash: Eth2Digest, - finalized_block_hash: Eth2Digest, timestamp: uint64, random: Eth2Digest, - fee_recipient: ethtypes.Address, withdrawals: Opt[seq[Withdrawal]], - execution_engine: Eth1Monitor): - Future[Option[bellatrix.PayloadID]] {.async.} = - logScope: - head_block_hash - finalized_block_hash - - let - forkchoiceResponse = - try: - awaitWithTimeout( - execution_engine.forkchoiceUpdated( - head_block_hash, safe_block_hash, finalized_block_hash, - timestamp, random.data, fee_recipient, withdrawals), - FORKCHOICEUPDATED_TIMEOUT): - error "Engine API fork-choice update timed out" - default(ForkchoiceUpdatedResponse) - except CatchableError as err: - error "Engine API fork-choice update failed", err = err.msg - default(ForkchoiceUpdatedResponse) - - payloadId = forkchoiceResponse.payloadId - - return if payloadId.isSome: - some(bellatrix.PayloadID(payloadId.get)) - else: - none(bellatrix.PayloadID) - -proc get_execution_payload[EP]( - payload_id: Option[bellatrix.PayloadID], execution_engine: Eth1Monitor): - Future[Opt[EP]] {.async.} = - return if payload_id.isNone(): - # Pre-merge, empty payload - Opt.some default(EP) - else: - when EP is bellatrix.ExecutionPayload: - Opt.some asConsensusExecutionPayload( - await execution_engine.getPayloadV1(payload_id.get)) - elif EP is capella.ExecutionPayload: - Opt.some asConsensusExecutionPayload( - await execution_engine.getPayloadV2(payload_id.get)) - elif EP is eip4844.ExecutionPayload: - Opt.some asConsensusExecutionPayload( - await execution_engine.getPayloadV3(payload_id.get)) - else: - static: doAssert "unknown execution payload type" + let finalizedEpochRef = node.dag.getFinalizedEpochRef() + result = node.elManager.getBlockProposalData( + state, finalizedEpochRef.eth1_data, + finalizedEpochRef.eth1_deposit_index) proc getFeeRecipient(node: BeaconNode, pubkey: ValidatorPubKey, @@ -337,12 +276,13 @@ proc getGasLimit(node: BeaconNode, from web3/engine_api_types import PayloadExecutionStatus from ../spec/datatypes/capella import BeaconBlock, ExecutionPayload -from ../spec/datatypes/eip4844 import - BeaconBlock, ExecutionPayload, shortLog +from ../spec/datatypes/deneb import BeaconBlock, ExecutionPayload, shortLog +from ../spec/beaconstate import get_expected_withdrawals -proc getExecutionPayload[T]( +proc getExecutionPayload( + PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, proposalState: ref ForkedHashedBeaconState, - epoch: Epoch, validator_index: ValidatorIndex): Future[Opt[T]] {.async.} = + epoch: Epoch, validator_index: ValidatorIndex): Future[Opt[PayloadType]] {.async.} = # https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload let feeRecipient = block: @@ -363,29 +303,17 @@ proc getExecutionPayload[T]( # compute way from CL due to incorporation of withdrawals into EL state # cannot use fake-EL fallback. Unlike transactions, withdrawals are not # optional, so one cannot avoid this by not including any withdrawals. - Opt.none T + Opt.none PayloadType elif (stateFork == ConsensusFork.Bellatrix and - T is bellatrix.ExecutionPayload): + PayloadType is bellatrix.ExecutionPayloadForSigning): Opt.some build_empty_execution_payload(forkyState.data, feeRecipient) elif stateFork == ConsensusFork.Bellatrix: raiseAssert "getExecutionPayload: mismatched proposalState and ExecutionPayload fork" else: # Vacuously -- these are pre-Bellatrix and not used. - Opt.some default(T) - - if node.eth1Monitor.isNil: - beacon_block_payload_errors.inc() - warn "getExecutionPayload: eth1Monitor not initialized; using empty execution payload" - return empty_execution_payload + Opt.some default(PayloadType) try: - # Minimize window for Eth1 monitor to shut down connection - await node.consensusManager.eth1Monitor.ensureDataProvider() - - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#request-2 - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/shanghai.md#request-2 - const GETPAYLOAD_TIMEOUT = 1.seconds - let beaconHead = node.attestationPool[].getBeaconHead(node.dag.head) executionHead = withState(proposalState[]): @@ -395,101 +323,39 @@ proc getExecutionPayload[T]( (static(default(Eth2Digest))) latestSafe = beaconHead.safeExecutionPayloadHash latestFinalized = beaconHead.finalizedExecutionPayloadHash - lastFcU = node.consensusManager.forkchoiceUpdatedInfo timestamp = withState(proposalState[]): compute_timestamp_at_slot(forkyState.data, forkyState.data.slot) + random = withState(proposalState[]): + get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) withdrawals = withState(proposalState[]): when stateFork >= ConsensusFork.Capella: - Opt.some get_expected_withdrawals(forkyState.data) + get_expected_withdrawals(forkyState.data) else: - Opt.none(seq[Withdrawal]) - payload_id = - if lastFcU.isSome and - lastFcU.get.headBlockRoot == executionHead and - lastFcU.get.safeBlockRoot == latestSafe and - lastFcU.get.finalizedBlockRoot == latestFinalized and - lastFcU.get.timestamp == timestamp and - lastFcU.get.feeRecipient == feeRecipient and - lastFcU.get.withdrawals == withdrawals: - some bellatrix.PayloadID(lastFcU.get.payloadId) - else: - debug "getExecutionPayload: didn't find payloadId, re-querying", - executionHead, latestSafe, latestFinalized, - timestamp, - feeRecipient, - cachedForkchoiceUpdateInformation = lastFcU - - let random = withState(proposalState[]): get_randao_mix( - forkyState.data, get_current_epoch(forkyState.data)) - let fcu_payload_id = (await forkchoice_updated( - executionHead, latestSafe, latestFinalized, timestamp, random, - feeRecipient, withdrawals, node.consensusManager.eth1Monitor)) - await sleepAsync(500.milliseconds) + @[] + payload = await node.elManager.getPayload( + PayloadType, executionHead, latestSafe, latestFinalized, + timestamp, random, feeRecipient, withdrawals) - fcu_payload_id + if payload.isNone: + error "Failed to obtain execution payload from EL", + executionHeadBlock = executionHead + return Opt.none(PayloadType) - let - payload = try: - awaitWithTimeout( - get_execution_payload[T](payload_id, node.consensusManager.eth1Monitor), - GETPAYLOAD_TIMEOUT): - beacon_block_payload_errors.inc() - warn "Getting execution payload from Engine API timed out", payload_id - empty_execution_payload - except CatchableError as err: - beacon_block_payload_errors.inc() - warn "Getting execution payload from Engine API failed", - payload_id, err = err.msg - empty_execution_payload - - when T is capella.ExecutionPayload: - if payload.isSome and withdrawals.isSome and - withdrawals.get() != payload.get.withdrawals.asSeq: - warn "Execution client did not return correct withdrawals", - payload = shortLog(payload.get()), - withdrawals_from_cl = withdrawals.get(), - withdrawals_from_el = payload.get.withdrawals - - return payload + return Opt.some payload.get except CatchableError as err: beacon_block_payload_errors.inc() error "Error creating non-empty execution payload; using empty execution payload", msg = err.msg return empty_execution_payload -proc getBlobsBundle( - node: BeaconNode, epoch: Epoch, validator_index: ValidatorIndex, - payload_id: PayloadID): Future[BlobsBundleV1] {.async.} = - # https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/validator.md#get_blobs_and_kzg_commitments - - # Minimize window for Eth1 monitor to shut down connection - await node.consensusManager.eth1Monitor.ensureDataProvider() - - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/experimental/blob-extension.md#engine_getblobsbundlev1 - const GETBLOBS_TIMEOUT = 1.seconds - - let payload = try: - awaitWithTimeout( - node.consensusManager.eth1Monitor.getBlobsBundleV1(payload_id), - GETBLOBS_TIMEOUT): - beacon_block_payload_errors.inc() - warn "Getting blobs sidecar from Engine API timed out", payload_id - default(BlobsBundleV1) - except CatchableError as err: - beacon_block_payload_errors.inc() - warn "Getting blobs sidecar from Engine API failed", - payload_id, err = err.msg - default(BlobsBundleV1) - - return payload - -proc makeBeaconBlockForHeadAndSlot*[EP]( +proc makeBeaconBlockForHeadAndSlot*( + PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, randao_reveal: ValidatorSig, validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, slot: Slot, - # Thse parameters are for the builder API - execution_payload: Opt[EP], + # These parameters are for the builder API + execution_payload: Opt[PayloadType], transactions_root: Opt[Eth2Digest], execution_payload_root: Opt[Eth2Digest], withdrawals_root: Opt[Eth2Digest]): @@ -521,28 +387,29 @@ proc makeBeaconBlockForHeadAndSlot*[EP]( var modified_execution_payload = execution_payload withState(state[]): when stateFork >= ConsensusFork.Capella and - EP isnot bellatrix.ExecutionPayload: + PayloadType.toFork >= ConsensusFork.Capella: let withdrawals = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]( get_expected_withdrawals(forkyState.data)) if withdrawals_root.isNone or hash_tree_root(withdrawals) != withdrawals_root.get: + # TODO: Why don't we fallback to the EL payload here? return err("Builder relay provided incorrect withdrawals root") # Otherwise, the state transition function notices that there are # too few withdrawals. - assign(modified_execution_payload.get.withdrawals, withdrawals) + assign(modified_execution_payload.get.executionPayload.withdrawals, + withdrawals) - let fut = newFuture[Opt[EP]]("given-payload") + let fut = newFuture[Opt[PayloadType]]("given-payload") fut.complete(modified_execution_payload) fut elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or not state[].is_merge_transition_complete: - let fut = newFuture[Opt[EP]]("empty-payload") - fut.complete(Opt.some(default(EP))) + let fut = newFuture[Opt[PayloadType]]("empty-payload") + fut.complete(Opt.some(default(PayloadType))) fut else: # Create execution payload while packing attestations - getExecutionPayload[EP]( - node, state, slot.epoch, validator_index) + getExecutionPayload(PayloadType, node, state, slot.epoch, validator_index) eth1Proposal = node.getBlockProposalEth1Data(state[]) @@ -580,7 +447,6 @@ proc makeBeaconBlockForHeadAndSlot*[EP]( exits, syncAggregate, payload, - (static(default(KZGCommitmentList))), noRollback, # Temporary state - no need for rollback cache, verificationFlags = {}, @@ -596,14 +462,14 @@ proc makeBeaconBlockForHeadAndSlot*[EP]( # workaround for https://github.com/nim-lang/Nim/issues/20900 to avoid default # parameters -proc makeBeaconBlockForHeadAndSlot*[EP]( - node: BeaconNode, randao_reveal: ValidatorSig, +proc makeBeaconBlockForHeadAndSlot*( + PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, randao_reveal: ValidatorSig, validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, slot: Slot): - Future[ForkedBlockResult] = - return makeBeaconBlockForHeadAndSlot[EP]( - node, randao_reveal, validator_index, graffiti, head, slot, - execution_payload = Opt.none(EP), + Future[ForkedBlockResult] {.async.} = + return await makeBeaconBlockForHeadAndSlot( + PayloadType, node, randao_reveal, validator_index, graffiti, head, slot, + execution_payload = Opt.none(PayloadType), transactions_root = Opt.none(Eth2Digest), execution_payload_root = Opt.none(Eth2Digest), withdrawals_root = Opt.none(Eth2Digest)) @@ -741,8 +607,7 @@ proc getBlindedBeaconBlock[ else: return err("getBlindedBeaconBlock: attempt to construct pre-Bellatrix blinded block") -proc getBlindedBlockParts[ - EPH: bellatrix.ExecutionPayloadHeader | capella.ExecutionPayloadHeader]( +proc getBlindedBlockParts[EPH: ForkyExecutionPayloadHeader]( node: BeaconNode, head: BlockRef, pubkey: ValidatorPubKey, slot: Slot, randao: ValidatorSig, validator_index: ValidatorIndex, graffiti: GraffitiBytes): Future[Result[(EPH, ForkedBeaconBlock), string]] @@ -776,23 +641,26 @@ proc getBlindedBlockParts[ # root after running the state transition function on an otherwise equivalent # non-blinded block without transactions. when EPH is bellatrix.ExecutionPayloadHeader: - type EP = bellatrix.ExecutionPayload + type PayloadType = bellatrix.ExecutionPayloadForSigning let withdrawals_root = Opt.none Eth2Digest elif EPH is capella.ExecutionPayloadHeader: - type EP = capella.ExecutionPayload + type PayloadType = capella.ExecutionPayloadForSigning + let withdrawals_root = Opt.some executionPayloadHeader.get.withdrawals_root + elif EPH is deneb.ExecutionPayloadHeader: + type PayloadType = deneb.ExecutionPayloadForSigning let withdrawals_root = Opt.some executionPayloadHeader.get.withdrawals_root else: static: doAssert false - var shimExecutionPayload: EP + var shimExecutionPayload: PayloadType copyFields( - shimExecutionPayload, executionPayloadHeader.get, getFieldNames(EPH)) + shimExecutionPayload.executionPayload, executionPayloadHeader.get, getFieldNames(EPH)) # In Capella and later, this doesn't have withdrawals, which each node knows # regardless of EL or builder API. makeBeaconBlockForHeadAndSlot fills it in # when it detects builder API usage. - let newBlock = await makeBeaconBlockForHeadAndSlot[EP]( - node, randao, validator_index, graffiti, head, slot, + let newBlock = await makeBeaconBlockForHeadAndSlot( + PayloadType, node, randao, validator_index, graffiti, head, slot, execution_payload = Opt.some shimExecutionPayload, transactions_root = Opt.some executionPayloadHeader.get.transactions_root, execution_payload_root = @@ -975,15 +843,21 @@ proc proposeBlock(node: BeaconNode, beacon_block_builder_missed_without_fallback.inc() return newBlockMEV.get + # TODO Compare the value of the MEV block and the execution block + # obtained from the EL below: + let newBlock = if slot.epoch >= node.dag.cfg.DENEB_FORK_EPOCH: - await makeBeaconBlockForHeadAndSlot[eip4844.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + deneb.ExecutionPayloadForSigning, node, randao, validator_index, node.graffitiBytes, head, slot) elif slot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH: - await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + capella.ExecutionPayloadForSigning, node, randao, validator_index, node.graffitiBytes, head, slot) else: - await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayloadForSigning, node, randao, validator_index, node.graffitiBytes, head, slot) if newBlock.isErr(): @@ -995,28 +869,13 @@ proc proposeBlock(node: BeaconNode, var blobs_sidecar = eip4844.BlobsSidecar( beacon_block_slot: slot, ) - when blck is eip4844.BeaconBlock and const_preset != "minimal": - # TODO when lastfcu is none, getExecutionPayload re-queries the EE. - # We don't do that here, which could lead us to propose invalid blocks - # (with a payload but no blobs). - if not (node.eth1Monitor.isNil) and - node.consensusManager.forkchoiceUpdatedInfo.isSome(): - - let - lastFcU = node.consensusManager.forkchoiceUpdatedInfo - payload_id = bellatrix.PayloadID(lastFcU.get.payloadId) - bundle = await getBlobsBundle(node, slot.epoch, validator_index, default(PayloadID)) - - # todo: actually compute proof over blobs using nim-kzg-4844 - kzg_aggregated_proof = default(KZGProof) - - blck.body.blob_kzg_commitments = - List[eip4844.KZGCommitment, Limit MAX_BLOBS_PER_BLOCK].init( - mapIt(bundle.kzgs, eip4844.KzgCommitment(it))) - - blobs_sidecar.blobs = List[eip4844.Blob, Limit MAX_BLOBS_PER_BLOCK].init( - mapIt(bundle.blobs, eip4844.Blob(it))) - blobs_sidecar.kzg_aggregated_proof = kzg_aggregated_proof + when blck is eip4844.BeaconBlock: + # TODO: The blobs_sidecar variable is not currently used. + # It could be initialized in makeBeaconBlockForHeadAndSlot + # where the required information is available. + # blobs_sidecar.blobs = forkedBlck.blobs + # blobs_sidecar.kzg_aggregated_proof = kzg_aggregated_proof + discard let blockRoot = hash_tree_root(blck) @@ -1062,7 +921,7 @@ proc proposeBlock(node: BeaconNode, message: blck, signature: signature, root: blockRoot) elif blck is eip4844.BeaconBlock: # TODO: also route blobs - eip4844.SignedBeaconBlock(message: blck, signature: signature, root: blockRoot) + deneb.SignedBeaconBlock(message: blck, signature: signature, root: blockRoot) else: static: doAssert "Unknown SignedBeaconBlock type" newBlockRef = diff --git a/docs/the_nimbus_book/src/pi-guide.md b/docs/the_nimbus_book/src/pi-guide.md index 6877ac2665..8b6cc248ff 100644 --- a/docs/the_nimbus_book/src/pi-guide.md +++ b/docs/the_nimbus_book/src/pi-guide.md @@ -320,8 +320,8 @@ INF 2020-12-01 11:25:37.073+01:00 Generating new networking key ... NOT 2020-12-01 11:25:45.267+00:00 Local validator attached tid=22009 file=validator_pool.nim:33 pubkey=95e3cbe88c71ab2d0e3053b7b12ead329a37e9fb8358bdb4e56251993ab68e46b9f9fa61035fe4cf2abf4c07dfad6c45 validator=95e3cbe8 ... -NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3836397 depositsProcessed=106147 -NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3841412 depositsProcessed=106391 +NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3836397 depositsProcessed=106147 +NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3841412 depositsProcessed=106391 ... INF 2020-12-01 11:26:31.000+00:00 Slot start topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:505 lastSlot=96566 scheduledSlot=96567 beaconTime=1w6d9h53m24s944us774ns peers=7 head=b54486c4:96563 headEpoch=3017 finalized=2f5d12e4:96479 finalizedEpoch=3014 INF 2020-12-01 11:26:36.285+00:00 Slot end topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:593 slot=96567 nextSlot=96568 head=b54486c4:96563 headEpoch=3017 finalizedHead=2f5d12e4:96479 finalizedEpoch=3014 diff --git a/docs/the_nimbus_book/src/start-syncing.md b/docs/the_nimbus_book/src/start-syncing.md index 0b40bef346..4aacc06004 100644 --- a/docs/the_nimbus_book/src/start-syncing.md +++ b/docs/the_nimbus_book/src/start-syncing.md @@ -41,12 +41,12 @@ You should see the following output: ``` INF 2020-12-01 11:25:33.487+01:00 Launching beacon node ... -INF 2020-12-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 file=nimbus_beacon_node.nim:198 path=build/data/shared_prater_0/db +INF 2020-12-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 path=build/data/shared_prater_0/db INF 2020-12-01 11:25:35.921+01:00 Block dag initialized INF 2020-12-01 11:25:37.073+01:00 Generating new networking key ... -NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3836397 depositsProcessed=106147 -NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3841412 depositsProcessed=106391 +NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3836397 depositsProcessed=106147 +NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3841412 depositsProcessed=106391 ... INF 2020-12-01 11:26:31.000+00:00 Slot start topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:505 lastSlot=96566 scheduledSlot=96567 beaconTime=1w6d9h53m24s944us774ns peers=7 head=b54486c4:96563 headEpoch=3017 finalized=2f5d12e4:96479 finalizedEpoch=3014 INF 2020-12-01 11:26:36.285+00:00 Slot end topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:593 slot=96567 nextSlot=96568 head=b54486c4:96563 headEpoch=3017 finalizedHead=2f5d12e4:96479 finalizedEpoch=3014 diff --git a/docs/the_nimbus_book/src/troubleshooting.md b/docs/the_nimbus_book/src/troubleshooting.md index 2cf262fc5e..a002b79e88 100644 --- a/docs/the_nimbus_book/src/troubleshooting.md +++ b/docs/the_nimbus_book/src/troubleshooting.md @@ -112,7 +112,7 @@ If you're being flooded with `Catching up on validator duties` messages, your CP If you see an error that looks like the following: ``` -{"lvl":"ERR","ts":"2021-05-11 09:05:53.547+00:00","msg":"Eth1 chain monitoring failure, restarting","topics":"eth1","tid":1,"file":"eth1_monitor.nim:1158","err":"Trying to access value with err: Failed to setup web3 connection"} +{"lvl":"ERR","ts":"2021-05-11 09:05:53.547+00:00","msg":"Eth1 chain monitoring failure, restarting","topics":"eth1","tid":1,"err":"Trying to access value with err: Failed to setup web3 connection"} ``` It's because your node can't connect to the web3 provider you have specified. Please double check that you've correctly specified your provider. If you haven't done so already, we recommend [adding a backup](web3-backup.md). diff --git a/ncli/deposit_downloader.nim b/ncli/deposit_downloader.nim index c61fadb0fa..47e590c0f1 100644 --- a/ncli/deposit_downloader.nim +++ b/ncli/deposit_downloader.nim @@ -1,137 +1,73 @@ import - json, strutils, + std/[json, strutils, times, sequtils], chronos, confutils, chronicles, web3, web3/ethtypes as web3Types, eth/async_utils, + ../beacon_chain/beacon_chain_db, ../beacon_chain/networking/network_metadata, ../beacon_chain/eth1/eth1_monitor, - ../beacon_chain/spec/helpers + ../beacon_chain/spec/[presets, helpers] type CliFlags = object - web3Url {. - name: "web3-url".}: string - depositContractAddress {. - name: "deposit-contract".}: string - startBlock {. - name: "start-block".}: uint64 - endBlock {. - name: "start-block".}: Option[uint64] + network {. + defaultValue: "mainnet" + name: "network".}: string + elUrls {. + name: "el".}: seq[EngineApiUrlConfigValue] + jwtSecret {. + name: "jwt-secret".}: Option[InputFile] outDepositsFile {. - defaultValue: "deposits.csv" - name: "out-deposits-file".}: OutFile - -contract(DepositContract): - proc deposit(pubkey: Bytes48, - withdrawalCredentials: Bytes32, - signature: Bytes96, - deposit_data_root: FixedBytes[32]) - - proc get_deposit_root(): FixedBytes[32] - proc get_deposit_count(): Bytes8 - - proc DepositEvent(pubkey: Bytes48, - withdrawalCredentials: Bytes32, - amount: Bytes8, - signature: Bytes96, - index: Bytes8) {.event.} - -const - web3Timeouts = 60.seconds + name: "out-deposits-file".}: Option[OutFile] proc main(flags: CliFlags) {.async.} = - let web3 = waitFor newWeb3(flags.web3Url) - - let endBlock = if flags.endBlock.isSome: - flags.endBlock.get - else: - awaitWithRetries(web3.provider.eth_getBlockByNumber(blockId"latest", false)).number.uint64 - - let depositContract = web3.contractSender( - DepositContract, - Eth1Address.fromHex flags.depositContractAddress) - - var depositsFile = open(string flags.outDepositsFile, fmWrite) - depositsFile.write( - "block", ",", - "transaction", ",", - "depositor", ",", - "amount", ",", - "validatorKey", ",", - "withdrawalCredentials", "\n") - - var currentBlock = flags.startBlock - while currentBlock < endBlock: - var - blocksPerRequest = 5000'u64 # This is roughly a day of Eth1 blocks - backoff = 100 - - while true: - let maxBlockNumberRequested = min(endBlock, currentBlock + blocksPerRequest - 1) - - template retryOrRaise(err: ref CatchableError) = - blocksPerRequest = blocksPerRequest div 2 - if blocksPerRequest == 0: - raise err - continue - - debug "Obtaining deposit log events", - fromBlock = currentBlock, - toBlock = maxBlockNumberRequested, - backoff - - # Reduce all request rate until we have a more general solution - # for dealing with Infura's rate limits - await sleepAsync(milliseconds(backoff)) - - let jsonLogsFut = depositContract.getJsonLogs( - DepositEvent, - fromBlock = some blockId(currentBlock), - toBlock = some blockId(maxBlockNumberRequested)) - - let depositLogs = try: - # Downloading large amounts of deposits can be quite slow - awaitWithTimeout(jsonLogsFut, web3Timeouts): - retryOrRaise newException(DataProviderTimeout, - "Request time out while obtaining json logs") - except CatchableError as err: - debug "Request for deposit logs failed", err = err.msg - backoff = (backoff * 3) div 2 - retryOrRaise err - - currentBlock = maxBlockNumberRequested + 1 - for deposit in depositLogs: - let txNode = deposit{"transactionHash"} - if txNode != nil and txNode.kind == JString: - var - pubkey: Bytes48 - withdrawalCredentials: Bytes32 - amount: Bytes8 - signature: Bytes96 - index: Bytes8 - - let blockNum = parseHexInt deposit["blockNumber"].str - let depositData = strip0xPrefix(deposit["data"].getStr) - var offset = 0 - offset += decode(depositData, offset, pubkey) - offset += decode(depositData, offset, withdrawalCredentials) - offset += decode(depositData, offset, amount) - offset += decode(depositData, offset, signature) - offset += decode(depositData, offset, index) - - let txHash = TxHash.fromHex txNode.str - let tx = awaitWithRetries web3.provider.eth_getTransactionByHash(txHash) - + let + db = BeaconChainDB.new("", inMemory = true) + metadata = getMetadataForNetwork(flags.network) + beaconTimeFn = proc(): BeaconTime = + # BEWARE of this hack + # The EL manager consults the current time in order to determine when the + # transition configuration exchange should start. We assume Bellatrix has + # just arrived which should trigger the configuration exchange and allow + # the downloader to connect to ELs serving the Engine API. + start_beacon_time(Slot(metadata.cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH)) + + let + elManager = ELManager.new( + metadata.cfg, + metadata.depositContractBlock, + metadata.depositContractBlockHash, + db, + toFinalEngineApiUrls(flags.elUrls, flags.jwtSecret), + eth1Network = metadata.eth1Network) + + elManager.start() + + var depositsFile: File + if flags.outDepositsFile.isSome: + depositsFile = open(string flags.outDepositsFile.get, fmWrite) + depositsFile.write( + "block", ",", + "validatorKey", ",", + "withdrawalCredentials", "\n") + depositsFile.flushFile() + + var blockIdx = 0 + while not elManager.isSynced(): + await sleepAsync chronos.seconds(1) + + if flags.outDepositsFile.isSome and + elManager.eth1ChainBlocks.len > blockIdx: + for i in blockIdx ..< elManager.eth1ChainBlocks.len: + for deposit in elManager.eth1ChainBlocks[i].deposits: depositsFile.write( - $blockNum, ",", - $txHash, ",", - $tx.source, ",", - $bytes_to_uint64(array[8, byte](amount)), ",", - $pubkey, ",", - $withdrawalCredentials, "\n") + $elManager.eth1ChainBlocks[i].number, ",", + $deposit.pubkey, ",", + $deposit.withdrawal_credentials, "\n") depositsFile.flushFile() - info "Done" + blockIdx = elManager.eth1ChainBlocks.len -waitFor main(load CliFlags) + info "All deposits downloaded" +waitFor main(load CliFlags) diff --git a/research/block_sim.nim b/research/block_sim.nim index 3cd9aae18b..6a8c3a69d5 100644 --- a/research/block_sim.nim +++ b/research/block_sim.nim @@ -70,7 +70,7 @@ from ../beacon_chain/spec/state_transition_block import process_block # when possible, to also use the forked version. It'll be worth keeping some # example of the non-forked version because it enables fork bootstrapping. -proc makeBeaconBlock( +proc makeSimulationBlock( cfg: RuntimeConfig, state: var phase0.HashedBeaconState, proposer_index: ValidatorIndex, @@ -81,7 +81,7 @@ proc makeBeaconBlock( deposits: seq[Deposit], exits: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: bellatrix.ExecutionPayload, + execution_payload: bellatrix.ExecutionPayloadForSigning, bls_to_execution_changes: SignedBLSToExecutionChangeList, rollback: RollbackHashedProc[phase0.HashedBeaconState], cache: var StateCache, @@ -99,8 +99,7 @@ proc makeBeaconBlock( var blck = partialBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, - static(default(eip4844.KZGCommitmentList)), execution_payload) + attestations, deposits, exits, sync_aggregate, execution_payload) let res = process_block( cfg, state.data, blck.asSigVerified(), verificationFlags, cache) @@ -114,7 +113,7 @@ proc makeBeaconBlock( ok(blck) -proc makeBeaconBlock( +proc makeSimulationBlock( cfg: RuntimeConfig, state: var altair.HashedBeaconState, proposer_index: ValidatorIndex, @@ -125,7 +124,7 @@ proc makeBeaconBlock( deposits: seq[Deposit], exits: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: bellatrix.ExecutionPayload, + execution_payload: bellatrix.ExecutionPayloadForSigning, bls_to_execution_changes: SignedBLSToExecutionChangeList, rollback: RollbackHashedProc[altair.HashedBeaconState], cache: var StateCache, @@ -143,8 +142,7 @@ proc makeBeaconBlock( var blck = partialBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, - static(default(eip4844.KZGCommitmentList)), execution_payload) + attestations, deposits, exits, sync_aggregate, execution_payload) # Signatures are verified elsewhere, so don't duplicate inefficiently here let res = process_block( @@ -159,7 +157,7 @@ proc makeBeaconBlock( ok(blck) -proc makeBeaconBlock( +proc makeSimulationBlock( cfg: RuntimeConfig, state: var bellatrix.HashedBeaconState, proposer_index: ValidatorIndex, @@ -170,7 +168,7 @@ proc makeBeaconBlock( deposits: seq[Deposit], exits: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: bellatrix.ExecutionPayload, + execution_payload: bellatrix.ExecutionPayloadForSigning, bls_to_execution_changes: SignedBLSToExecutionChangeList, rollback: RollbackHashedProc[bellatrix.HashedBeaconState], cache: var StateCache, @@ -188,8 +186,7 @@ proc makeBeaconBlock( var blck = partialBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, - static(default(eip4844.KZGCommitmentList)), execution_payload) + attestations, deposits, exits, sync_aggregate, execution_payload) let res = process_block( cfg, state.data, blck.asSigVerified(), verificationFlags, cache) @@ -203,7 +200,7 @@ proc makeBeaconBlock( ok(blck) -proc makeBeaconBlock( +proc makeSimulationBlock( cfg: RuntimeConfig, state: var capella.HashedBeaconState, proposer_index: ValidatorIndex, @@ -214,7 +211,7 @@ proc makeBeaconBlock( deposits: seq[Deposit], exits: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: capella.ExecutionPayload, + execution_payload: capella.ExecutionPayloadForSigning, bls_to_execution_changes: SignedBLSToExecutionChangeList, rollback: RollbackHashedProc[capella.HashedBeaconState], cache: var StateCache, @@ -232,8 +229,7 @@ proc makeBeaconBlock( var blck = partialBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, - static(default(eip4844.KZGCommitmentList)), execution_payload) + attestations, deposits, exits, sync_aggregate, execution_payload) let res = process_block( cfg, state.data, blck.asSigVerified(), verificationFlags, cache) @@ -247,7 +243,7 @@ proc makeBeaconBlock( ok(blck) -proc makeBeaconBlock( +proc makeSimulationBlock( cfg: RuntimeConfig, state: var eip4844.HashedBeaconState, proposer_index: ValidatorIndex, @@ -258,7 +254,7 @@ proc makeBeaconBlock( deposits: seq[Deposit], exits: BeaconBlockValidatorChanges, sync_aggregate: SyncAggregate, - execution_payload: eip4844.ExecutionPayload, + execution_payload: eip4844.ExecutionPayloadForSigning, bls_to_execution_changes: SignedBLSToExecutionChangeList, rollback: RollbackHashedProc[eip4844.HashedBeaconState], cache: var StateCache, @@ -276,8 +272,7 @@ proc makeBeaconBlock( var blck = partialBeaconBlock( cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, - default(eip4844.KZGCommitmentList), execution_payload) + attestations, deposits, exits, sync_aggregate, execution_payload) let res = process_block( cfg, state.data, blck.asSigVerified(), verificationFlags, cache) @@ -504,7 +499,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6, addr state.denebData else: static: doAssert false - message = makeBeaconBlock( + message = makeSimulationBlock( cfg, hashedState[], proposerIdx, @@ -519,11 +514,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6, BeaconBlockValidatorChanges(), sync_aggregate, when T is eip4844.SignedBeaconBlock: - default(eip4844.ExecutionPayload) + default(eip4844.ExecutionPayloadForSigning) elif T is capella.SignedBeaconBlock: - default(capella.ExecutionPayload) + default(capella.ExecutionPayloadForSigning) else: - default(bellatrix.ExecutionPayload), + default(bellatrix.ExecutionPayloadForSigning), static(default(SignedBLSToExecutionChangeList)), noRollback, cache) diff --git a/research/wss_sim.nim b/research/wss_sim.nim index c0705ffcb8..1446963e05 100644 --- a/research/wss_sim.nim +++ b/research/wss_sim.nim @@ -162,8 +162,7 @@ cli do(validatorsDir: string, secretsDir: string, @[], BeaconBlockValidatorChanges(), syncAggregate, - default(bellatrix.ExecutionPayload), - default(eip4844.KZGCommitmentList), + default(bellatrix.ExecutionPayloadForSigning), noRollback, cache).get() diff --git a/scripts/geth_genesis.json b/scripts/geth_genesis.json deleted file mode 100644 index 9960dc6ab5..0000000000 --- a/scripts/geth_genesis.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "config": { - "chainId":9999, - "homesteadBlock":0, - "eip150Block":0, - "eip155Block":0, - "eip158Block":0, - "byzantiumBlock":0, - "constantinopleBlock":0, - "petersburgBlock":0, - "istanbulBlock":0, - "muirGlacierBlock":0, - "berlinBlock":0, - "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 - }, - "nonce":"0x42", - "timestamp":"0x0", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "difficulty":"0x400000000", - "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase":"0x0000000000000000000000000000000000000000", - "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"}, - "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf":{"balance":"0x6d6172697573766477000000"}, - "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf":{"balance":"0x6d6172697573766477000000"}, - "0x6813eb9362372eef6200f3b1dbc3f819671cba69":{"balance":"0x6d6172697573766477000000"}, - "0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718":{"balance":"0x6d6172697573766477000000"}, - "0xe1ab8145f7e55dc933d51a18c793f901a3a0b276":{"balance":"0x6d6172697573766477000000"}, - "0xe57bfe9f44b819898f47bf37e5af72a0783e1141":{"balance":"0x6d6172697573766477000000"}, - "0xd41c057fd1c78805aac12b0a94a405c0461a6fbb":{"balance":"0x6d6172697573766477000000"}, - "0xf1f6619b38a98d6de0800f1defc0a6399eb6d30c":{"balance":"0x6d6172697573766477000000"}, - "0xf7edc8fa1ecc32967f827c9043fcae6ba73afa5c":{"balance":"0x6d6172697573766477000000"} - }, - "number":"0x0", - "gasUsed":"0x0", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" -} diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index 68de5d9f8a..5fbc20de91 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -719,7 +719,7 @@ done if [[ "${REUSE_BINARIES}" == "0" || "${BINARIES_MISSING}" == "1" ]]; then if [[ "${DL_NIMBUS_ETH2}" == "0" ]]; then log "Rebuilding binaries ${BINARIES}" - ${MAKE} -j ${NPROC} LOG_LEVEL=TRACE NIMFLAGS="${NIMFLAGS} -d:local_testnet -d:const_preset=${CONST_PRESET}" ${BINARIES} + ${MAKE} -j ${NPROC} LOG_LEVEL=TRACE NIMFLAGS="${NIMFLAGS} -d:local_testnet -d:const_preset=${CONST_PRESET} -d:web3_consensus_const_preset=${CONST_PRESET}" ${BINARIES} fi fi diff --git a/scripts/nimbusel_genesis.json b/scripts/nimbusel_genesis.json deleted file mode 100644 index 9960dc6ab5..0000000000 --- a/scripts/nimbusel_genesis.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "config": { - "chainId":9999, - "homesteadBlock":0, - "eip150Block":0, - "eip155Block":0, - "eip158Block":0, - "byzantiumBlock":0, - "constantinopleBlock":0, - "petersburgBlock":0, - "istanbulBlock":0, - "muirGlacierBlock":0, - "berlinBlock":0, - "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 - }, - "nonce":"0x42", - "timestamp":"0x0", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "difficulty":"0x400000000", - "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase":"0x0000000000000000000000000000000000000000", - "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"}, - "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf":{"balance":"0x6d6172697573766477000000"}, - "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf":{"balance":"0x6d6172697573766477000000"}, - "0x6813eb9362372eef6200f3b1dbc3f819671cba69":{"balance":"0x6d6172697573766477000000"}, - "0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718":{"balance":"0x6d6172697573766477000000"}, - "0xe1ab8145f7e55dc933d51a18c793f901a3a0b276":{"balance":"0x6d6172697573766477000000"}, - "0xe57bfe9f44b819898f47bf37e5af72a0783e1141":{"balance":"0x6d6172697573766477000000"}, - "0xd41c057fd1c78805aac12b0a94a405c0461a6fbb":{"balance":"0x6d6172697573766477000000"}, - "0xf1f6619b38a98d6de0800f1defc0a6399eb6d30c":{"balance":"0x6d6172697573766477000000"}, - "0xf7edc8fa1ecc32967f827c9043fcae6ba73afa5c":{"balance":"0x6d6172697573766477000000"} - }, - "number":"0x0", - "gasUsed":"0x0", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" -} diff --git a/tests/simulation/restapi.sh b/tests/simulation/restapi.sh index 129bda1093..e2fbcbc3a1 100755 --- a/tests/simulation/restapi.sh +++ b/tests/simulation/restapi.sh @@ -232,6 +232,7 @@ ${NIMBUS_BEACON_NODE_BIN} \ --secrets-dir="${SECRETS_DIR}" \ --doppelganger-detection=off \ --nat=none \ + --no-el \ --metrics \ --metrics-address=${METRICS_ADDRESS} \ --metrics-port=${BASE_METRICS_PORT} \ diff --git a/tests/test_block_processor.nim b/tests/test_block_processor.nim index ddd6a0fc9d..d8a0ed3de3 100644 --- a/tests/test_block_processor.nim +++ b/tests/test_block_processor.nim @@ -42,11 +42,11 @@ suite "Block processor" & preset(): verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) quarantine = newClone(Quarantine.init()) attestationPool = newClone(AttestationPool.init(dag, quarantine)) - eth1Monitor = new Eth1Monitor + elManager = new ELManager # TODO: initialise this properly actionTracker: ActionTracker keymanagerHost: ref KeymanagerHost consensusManager = ConsensusManager.new( - dag, attestationPool, quarantine, eth1Monitor, actionTracker, + dag, attestationPool, quarantine, elManager, actionTracker, newClone(DynamicFeeRecipientsStore.init()), "", default(Eth1Address), defaultGasLimit) state = newClone(dag.headState) diff --git a/tests/test_eth1_monitor.nim b/tests/test_eth1_monitor.nim index 987cf9955a..5ca914b4e9 100644 --- a/tests/test_eth1_monitor.nim +++ b/tests/test_eth1_monitor.nim @@ -9,7 +9,7 @@ import unittest2, - ../beacon_chain/eth1/eth1_monitor, + ../beacon_chain/eth1/[el_conf, eth1_monitor], ./testutil from ssz_serialization/types import Limit, List, init @@ -500,7 +500,7 @@ suite "Eth1 monitor": for executionPayload in executionPayloads: check: - executionPayload == asConsensusExecutionPayload( + executionPayload == asConsensusType( asEngineExecutionPayload(executionPayload)) test "Roundtrip engine RPC V2 and capella ExecutionPayload representations": @@ -1020,7 +1020,7 @@ suite "Eth1 monitor": for executionPayload in executionPayloads: check: - executionPayload == asConsensusExecutionPayload( + executionPayload == asConsensusType( asEngineExecutionPayload(executionPayload)) test "Roundtrip engine RPC V3 and eip4844 ExecutionPayload representations": @@ -1564,5 +1564,5 @@ suite "Eth1 monitor": for executionPayload in executionPayloads: check: - executionPayload == asConsensusExecutionPayload( + executionPayload == asConsensusType( asEngineExecutionPayload(executionPayload)) diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim index d653362a9b..7472b4e684 100644 --- a/tests/test_helpers.nim +++ b/tests/test_helpers.nim @@ -70,7 +70,7 @@ suite "Spec helpers": let state = newClone(initGenesisState(cfg = cfg).bellatrixData) proc testCase(recipient: Eth1Address) = - let payload = build_empty_execution_payload(state[].data, recipient) + let payload = build_empty_execution_payload(state[].data, recipient).executionPayload check payload.fee_recipient == bellatrix.ExecutionAddress(data: distinctBase(recipient)) diff --git a/tests/test_keymanager_api.nim b/tests/test_keymanager_api.nim index 2c4940e6b1..a2f8ba65e4 100644 --- a/tests/test_keymanager_api.nim +++ b/tests/test_keymanager_api.nim @@ -289,6 +289,7 @@ proc startBeaconNode(basePort: int) {.raises: [Defect, CatchableError].} = "--rest=true", "--rest-address=127.0.0.1", "--rest-port=" & $(basePort + PortKind.KeymanagerBN.ord), + "--no-el", "--keymanager=true", "--keymanager-address=127.0.0.1", "--keymanager-port=" & $(basePort + PortKind.KeymanagerBN.ord), diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index c6263ab067..a600c2b3de 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -79,7 +79,7 @@ func signBlock( ForkedSignedBeaconBlock.init(forked, root, signature) proc build_empty_merge_execution_payload(state: bellatrix.BeaconState): - bellatrix.ExecutionPayload = + bellatrix.ExecutionPayloadForSigning = ## Assuming a pre-state of the same slot, build a valid ExecutionPayload ## without any transactions from a non-merged block. @@ -103,7 +103,8 @@ proc build_empty_merge_execution_payload(state: bellatrix.BeaconState): payload.block_hash = rlpHash payloadToBlockHeader(payload) - payload + bellatrix.ExecutionPayloadForSigning(executionPayload: payload, + blockValue: Wei.zero) proc addTestBlock*( state: var ForkedHashedBeaconState, @@ -140,7 +141,7 @@ proc addTestBlock*( if cfg.CAPELLA_FORK_EPOCH != FAR_FUTURE_EPOCH: # Can't keep correctly doing this once Capella happens, but LVH search # test relies on merging. So, merge only if no Capella transition. - default(bellatrix.ExecutionPayload) + default(bellatrix.ExecutionPayloadForSigning) else: withState(state): when stateFork == ConsensusFork.Bellatrix: @@ -153,9 +154,9 @@ proc addTestBlock*( else: build_empty_merge_execution_payload(forkyState.data) else: - default(bellatrix.ExecutionPayload) + default(bellatrix.ExecutionPayloadForSigning) else: - default(bellatrix.ExecutionPayload) + default(bellatrix.ExecutionPayloadForSigning) let message = makeBeaconBlock( @@ -174,7 +175,6 @@ proc addTestBlock*( BeaconBlockValidatorChanges(), sync_aggregate, execution_payload, - (static(default(deneb.KZGCommitmentList))), noRollback, cache, verificationFlags = {skipBlsValidation}) diff --git a/vendor/capella-testnets b/vendor/capella-testnets deleted file mode 160000 index c22f012e4a..0000000000 --- a/vendor/capella-testnets +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c22f012e4af09bc8e14d1540a956c47491329150 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index c0ecb42613..07e4705b1f 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit c0ecb426131ebc2c3d11c085d749f55884f6fea6 +Subproject commit 07e4705b1fcff917b1eec37c11b3f8001017a76d diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 4726fdc223..610dda642c 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 4726fdc223d7cc8c3fe490e9ab58a7b43eae742a +Subproject commit 610dda642c3d7e5b0f50bba5457f0da490219001 diff --git a/vendor/withdrawals-testnets b/vendor/withdrawals-testnets new file mode 160000 index 0000000000..38a7b24069 --- /dev/null +++ b/vendor/withdrawals-testnets @@ -0,0 +1 @@ +Subproject commit 38a7b240699d75758ec33f35c766d0ca71bb7759