@@ -5,7 +5,7 @@ import type { L1BlockId } from '@aztec/ethereum/l1-types';
55import type { ViemPublicClient , ViemPublicDebugClient } from '@aztec/ethereum/types' ;
66import { maxBigint } from '@aztec/foundation/bigint' ;
77import { BlockNumber , CheckpointNumber , EpochNumber } from '@aztec/foundation/branded-types' ;
8- import { Buffer32 } from '@aztec/foundation/buffer' ;
8+ import { Buffer16 , Buffer32 } from '@aztec/foundation/buffer' ;
99import { pick } from '@aztec/foundation/collection' ;
1010import { Fr } from '@aztec/foundation/curves/bn254' ;
1111import { type Logger , createLogger } from '@aztec/foundation/log' ;
@@ -384,30 +384,11 @@ export class ArchiverL1Synchronizer implements Traceable {
384384
385385 // Compare local message store state with the remote. If they match, we just advance the match pointer.
386386 const remoteMessagesState = await this . inbox . getState ( { blockNumber : currentL1BlockNumber } ) ;
387- < < < << << HEAD
388-
389- this . log . trace ( `Retrieved remote inbox state at L1 block ${ currentL1BlockNumber } .` , {
390- localMessagesInserted,
391- localLastMessage,
392- remoteMessagesState,
393- } ) ;
394-
395- // Compare message count and rolling hash. If they match, no need to retrieve anything.
396- if (
397- remoteMessagesState . totalMessagesInserted === localMessagesInserted &&
398- remoteMessagesState . messagesRollingHash . equals ( localLastMessage ?. rollingHash ?? Buffer32 . ZERO )
399- ) {
400- this . log . trace (
401- `No L1 to L2 messages to query between L1 blocks ${ messagesSyncPoint . l1BlockNumber } and ${ currentL1BlockNumber } .` ,
402- ) ;
403- return ;
404- = === ===
405387 const localLastMessage = await this . store . getLastL1ToL2Message ( ) ;
406388 if ( await this . localStateMatches ( localLastMessage , remoteMessagesState ) ) {
407389 this . log . trace ( `Local L1 to L2 messages are already in sync with remote at L1 block ${ currentL1BlockNumber } ` ) ;
408- await this . store . setMessageSyncState ( currentL1Block , remoteMessagesState . treeInProgress ) ;
390+ await this . store . setMessageSyncState ( currentL1Block ) ;
409391 return true ;
410- > >>> >>> 77 c78761552 ( fix ( archiver ) : always advance L1 - to - L2 messages syncpoint to current L1 block )
411392 }
412393
413394 // If not, then we are out of sync. Most likely there are new messages on the inbox, so we try retrieving them.
@@ -422,7 +403,7 @@ export class ArchiverL1Synchronizer implements Traceable {
422403 `Failed to store L1 to L2 messages retrieved from L1: ${ error . message } . Rolling back syncpoint to retry.` ,
423404 { inboxMessage : error . inboxMessage } ,
424405 ) ;
425- await this . rollbackL1ToL2Messages ( remoteMessagesState . treeInProgress ) ;
406+ await this . rollbackL1ToL2Messages ( ) ;
426407 return false ;
427408 }
428409 throw error ;
@@ -437,12 +418,12 @@ export class ArchiverL1Synchronizer implements Traceable {
437418 `Local L1 to L2 messages state does not match remote after sync attempt. Rolling back syncpoint to retry.` ,
438419 { localLastMessageAfterSync, remoteMessagesState } ,
439420 ) ;
440- await this . rollbackL1ToL2Messages ( remoteMessagesState . treeInProgress ) ;
421+ await this . rollbackL1ToL2Messages ( ) ;
441422 return false ;
442423 }
443424
444425 // Advance the syncpoint after a successful sync
445- await this . store . setMessageSyncState ( currentL1Block , remoteMessagesState . treeInProgress ) ;
426+ await this . store . setMessageSyncState ( currentL1Block ) ;
446427 return true ;
447428 }
448429
@@ -492,7 +473,7 @@ export class ArchiverL1Synchronizer implements Traceable {
492473 * Rolls back local L1 to L2 messages to the last common message with L1, and updates the syncpoint to the L1 block of that message.
493474 * If no common message is found, rolls back all messages and sets the syncpoint to the start block.
494475 */
495- private async rollbackL1ToL2Messages ( remoteTreeInProgress : bigint ) : Promise < L1BlockId > {
476+ private async rollbackL1ToL2Messages ( ) : Promise < L1BlockId > {
496477 // Slowly go back through our messages until we find the last common message.
497478 // We could query the logs in batch as an optimization, but the depth of the reorg should not be deep, and this
498479 // is a very rare case, so it's fine to query one log at a time.
@@ -532,11 +513,8 @@ export class ArchiverL1Synchronizer implements Traceable {
532513 const syncPointL1BlockNumber = commonMsg ? commonMsg . l1BlockNumber - 1n : this . l1Constants . l1StartBlock ;
533514 const syncPointL1BlockHash = await this . getL1BlockHash ( syncPointL1BlockNumber ) ;
534515 const messagesSyncPoint = { l1BlockNumber : syncPointL1BlockNumber , l1BlockHash : syncPointL1BlockHash } ;
535- await this . store . setMessageSyncState ( messagesSyncPoint , remoteTreeInProgress ) ;
536- this . log . verbose ( `Updated messages syncpoint to L1 block ${ syncPointL1BlockNumber } ` , {
537- ...messagesSyncPoint ,
538- remoteTreeInProgress,
539- } ) ;
516+ await this . store . setMessageSyncState ( messagesSyncPoint ) ;
517+ this . log . verbose ( `Updated messages syncpoint to L1 block ${ syncPointL1BlockNumber } ` , messagesSyncPoint ) ;
540518 return messagesSyncPoint ;
541519 }
542520
0 commit comments