@@ -440,8 +440,9 @@ func (s *Syncer) runFollowMode() {
440440 err := s .subscribeAndFollow ()
441441 if err != nil && ! errors .Is (err , context .Canceled ) {
442442 s .metrics .SubscribeErrors .Add (1 )
443- s .logger .Warn ().Err (err ).Msg ("subscribe failed, falling back to catchup" )
444- // Don't sleep - go straight to catchup mode to recover
443+ s .logger .Warn ().Err (err ).Msg ("subscribe failed, will retry via mode check" )
444+ // No explicit catchup call needed - daWorkerLoop will call determineSyncMode()
445+ // which defaults to catchup on error or when behind
445446 }
446447}
447448
@@ -977,25 +978,15 @@ func hashTx(tx []byte) string {
977978}
978979
979980// calculateBlockFullness returns a value between 0.0 and 1.0 indicating how full the block is.
980- // It estimates fullness based on total data size.
981+ // It estimates fullness based on total data size relative to max blob size .
981982// This is a heuristic - actual limits may vary by execution layer.
982983func (s * Syncer ) calculateBlockFullness (data * types.Data ) float64 {
983- const maxDataSize = common .DefaultMaxBlobSize
984-
985- var fullness float64
986- count := 0
987-
988- // Check data size fullness
989- dataSize := uint64 (0 )
984+ var dataSize uint64
990985 for _ , tx := range data .Txs {
991986 dataSize += uint64 (len (tx ))
992987 }
993- sizeFullness := float64 (dataSize ) / float64 (maxDataSize )
994- fullness += min (sizeFullness , 1.0 )
995- count ++
996-
997- // Return average fullness
998- return fullness / float64 (count )
988+ fullness := float64 (dataSize ) / float64 (common .DefaultMaxBlobSize )
989+ return min (fullness , 1.0 )
999990}
1000991
1001992// updateDynamicGracePeriod updates the grace period multiplier based on block fullness.
0 commit comments