Skip to content

Commit 22d34d1

Browse files
committed
onionmessage: add LRU cache to SCID resolver
Add an LRU cache to GraphNodeResolver to avoid repeated database lookups when resolving SCIDs to node public keys. The cache stores up to 1000 compressed pubkey entries, which is sufficient for typical onion message forwarding scenarios. This change also introduces a NewGraphNodeResolver constructor to properly initialize the cache, replacing direct struct literal usage.
1 parent 3a4d672 commit 22d34d1

File tree

2 files changed

+78
-9
lines changed

2 files changed

+78
-9
lines changed

onionmessage/resolver.go

Lines changed: 75 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,22 +4,85 @@ import (
44
"encoding/hex"
55

66
"github.com/btcsuite/btcd/btcec/v2"
7+
"github.com/lightninglabs/neutrino/cache/lru"
78
graphdb "github.com/lightningnetwork/lnd/graph/db"
89
"github.com/lightningnetwork/lnd/lnwire"
910
)
1011

12+
const (
13+
// defaultSCIDCacheSize is the default number of SCID to pubkey mappings
14+
// to cache. This is relatively small since onion message forwarding via
15+
// SCID is expected to be infrequent compared to forwarding via explicit
16+
// node ID.
17+
defaultSCIDCacheSize = 1000
18+
)
19+
20+
// cachedPubKey is a wrapper around a compressed public key that implements the
21+
// cache.Value interface required by the LRU cache.
22+
type cachedPubKey struct {
23+
pubKeyBytes [33]byte
24+
}
25+
26+
// Size returns the "size" of an entry. We return 1 as we just want to limit
27+
// the total number of entries rather than do accurate size accounting.
28+
func (c *cachedPubKey) Size() (uint64, error) {
29+
return 1, nil
30+
}
31+
32+
// GraphNodeResolver resolves node public keys from short channel IDs using the
33+
// channel graph. It maintains an LRU cache to avoid repeated database lookups
34+
// for frequently used SCIDs.
1135
type GraphNodeResolver struct {
12-
Graph *graphdb.ChannelGraph
13-
OurPub *btcec.PublicKey
36+
graph *graphdb.ChannelGraph
37+
ourPub *btcec.PublicKey
38+
39+
// scidCache is an LRU cache mapping SCID (as uint64) to the remote
40+
// node's compressed public key bytes.
41+
scidCache *lru.Cache[uint64, *cachedPubKey]
42+
}
43+
44+
// NewGraphNodeResolver creates a new GraphNodeResolver with the given channel
45+
// graph and our node's public key. It initializes an LRU cache for SCID
46+
// lookups.
47+
func NewGraphNodeResolver(graph *graphdb.ChannelGraph,
48+
ourPub *btcec.PublicKey) *GraphNodeResolver {
49+
50+
return &GraphNodeResolver{
51+
graph: graph,
52+
ourPub: ourPub,
53+
scidCache: lru.NewCache[uint64, *cachedPubKey](
54+
defaultSCIDCacheSize,
55+
),
56+
}
1457
}
1558

1659
// RemotePubFromSCID resolves a node public key from a short channel ID.
60+
// It first checks the LRU cache and falls back to a database lookup on cache
61+
// miss.
1762
func (r *GraphNodeResolver) RemotePubFromSCID(
1863
scid lnwire.ShortChannelID) (*btcec.PublicKey, error) {
1964

20-
log.Tracef("Resolving node public key for SCID %v", scid)
65+
scidInt := scid.ToUint64()
66+
67+
// Check the cache first.
68+
if cached, err := r.scidCache.Get(scidInt); err == nil {
69+
pubKey, parseErr := btcec.ParsePubKey(cached.pubKeyBytes[:])
70+
if parseErr == nil {
71+
log.Tracef("Resolved SCID %v from cache to node %s",
72+
scid,
73+
hex.EncodeToString(cached.pubKeyBytes[:]))
74+
75+
return pubKey, nil
76+
}
2177

22-
edge, _, _, err := r.Graph.FetchChannelEdgesByID(scid.ToUint64())
78+
// Cache contained invalid data, fall through to DB lookup.
79+
log.Debugf("Invalid cached pubkey for SCID %v: %v",
80+
scid, parseErr)
81+
}
82+
83+
log.Tracef("Resolving node public key for SCID %v from graph", scid)
84+
85+
edge, _, _, err := r.graph.FetchChannelEdgesByID(scidInt)
2386
if err != nil {
2487
log.Debugf("Failed to fetch channel edges for SCID %v: %v",
2588
scid, err)
@@ -28,7 +91,7 @@ func (r *GraphNodeResolver) RemotePubFromSCID(
2891
}
2992

3093
otherNodeKeyBytes, err := edge.OtherNodeKeyBytes(
31-
r.OurPub.SerializeCompressed(),
94+
r.ourPub.SerializeCompressed(),
3295
)
3396
if err != nil {
3497
log.Debugf("Failed to get other node key for SCID %v: %v",
@@ -45,6 +108,13 @@ func (r *GraphNodeResolver) RemotePubFromSCID(
45108
return nil, err
46109
}
47110

111+
// Cache the result for future lookups. We ignore the return values as
112+
// caching is best-effort and a failure just means the next lookup will
113+
// hit the database again.
114+
_, _ = r.scidCache.Put(scidInt, &cachedPubKey{
115+
pubKeyBytes: otherNodeKeyBytes,
116+
})
117+
48118
log.Tracef("Resolved SCID %v to node %s", scid,
49119
hex.EncodeToString(pubKey.SerializeCompressed()))
50120

server.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2380,10 +2380,9 @@ func (s *server) Start(ctx context.Context) error {
23802380
// connections and registered with each peer's message router.
23812381
// Skip if onion messaging is disabled via config.
23822382
if !s.cfg.ProtocolOptions.NoOnionMessages() {
2383-
resolver := &onionmessage.GraphNodeResolver{
2384-
Graph: s.graphDB,
2385-
OurPub: s.identityECDH.PubKey(),
2386-
}
2383+
resolver := onionmessage.NewGraphNodeResolver(
2384+
s.graphDB, s.identityECDH.PubKey(),
2385+
)
23872386
s.onionEndpoint, err = onionmessage.NewOnionEndpoint(
23882387
s.actorSystem.Receptionist(),
23892388
s.sphinxOnionMsg,

0 commit comments

Comments
 (0)