Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,7 @@ impl TestChainMonitor {
Arc::clone(&persister),
Arc::clone(&keys),
keys.get_peer_storage_key(),
false,
)),
logger,
keys,
Expand Down
1 change: 1 addition & 0 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,7 @@ pub fn do_test(mut data: &[u8], logger: &Arc<dyn Logger + MaybeSend + MaybeSync>
Arc::new(TestPersister { update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) }),
Arc::clone(&keys_manager),
keys_manager.get_peer_storage_key(),
false,
));

let network = Network::Bitcoin;
Expand Down
1 change: 1 addition & 0 deletions fuzz/src/lsps_message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ pub fn do_test(data: &[u8]) {
Arc::clone(&kv_store),
Arc::clone(&keys_manager),
keys_manager.get_peer_storage_key(),
false,
));
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
Expand Down
1 change: 1 addition & 0 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2444,6 +2444,7 @@ mod tests {
Arc::clone(&kv_store),
Arc::clone(&keys_manager),
keys_manager.get_peer_storage_key(),
false,
));
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
Expand Down
1 change: 1 addition & 0 deletions lightning-persister/src/fs_store/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ mod tests {
&chanmon_cfgs[0].fee_estimator,
&store,
node_cfgs[0].keys_manager,
false,
);
node_cfgs[0].chain_monitor = chain_mon_0;
let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
Expand Down
2 changes: 2 additions & 0 deletions lightning-persister/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ pub(crate) fn do_test_store<K: KVStoreSync + Sync>(store_0: &K, store_1: &K) {
&chanmon_cfgs[0].fee_estimator,
store_0,
node_cfgs[0].keys_manager,
false,
);
let chain_mon_1 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[1].chain_source),
Expand All @@ -128,6 +129,7 @@ pub(crate) fn do_test_store<K: KVStoreSync + Sync>(store_0: &K, store_1: &K) {
&chanmon_cfgs[1].fee_estimator,
store_1,
node_cfgs[1].keys_manager,
false,
);
node_cfgs[0].chain_monitor = chain_mon_0;
node_cfgs[1].chain_monitor = chain_mon_1;
Expand Down
109 changes: 103 additions & 6 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::secp256k1::PublicKey;

use crate::chain;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
#[cfg(peer_storage)]
use crate::chain::channelmonitor::write_chanmon_internal;
use crate::chain::channelmonitor::{
Balance, ChannelMonitor, ChannelMonitorUpdate, MonitorEvent, TransactionOutputs,
WithChannelMonitor,
Balance, ChannelMonitor, ChannelMonitorUpdate, ClaimInfo, ClaimKey, ClaimMetadata,
MonitorEvent, TransactionOutputs, WithChannelMonitor,
};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, WatchedOutput};
Expand Down Expand Up @@ -371,6 +371,9 @@ pub struct ChainMonitor<

#[cfg(peer_storage)]
our_peerstorage_encryption_key: PeerStorageKey,

/// If false, claim info persistence events are swallowed.
offload_claim_info: bool,
}

impl<
Expand All @@ -397,7 +400,7 @@ where
pub fn new_async_beta(
chain_source: Option<C>, broadcaster: T, logger: L, feeest: F,
persister: MonitorUpdatingPersisterAsync<K, S, L, ES, SP, T, F>, _entropy_source: ES,
_our_peerstorage_encryption_key: PeerStorageKey,
_our_peerstorage_encryption_key: PeerStorageKey, offload_claim_info: bool,
) -> Self {
let event_notifier = Arc::new(Notifier::new());
Self {
Expand All @@ -414,6 +417,7 @@ where
pending_send_only_events: Mutex::new(Vec::new()),
#[cfg(peer_storage)]
our_peerstorage_encryption_key: _our_peerstorage_encryption_key,
offload_claim_info,
}
}
}
Expand Down Expand Up @@ -590,6 +594,15 @@ where
/// always need to fetch full blocks absent another means for determining which blocks contain
/// transactions relevant to the watched channels.
///
/// If `offload_claim_info` is set to `true`, [`Event::PersistClaimInfo`] events will be
/// surfaced, allowing callers to offload claim information from [`ChannelMonitor`]s to reduce
/// their size. If set to `false`, these events will be silently ignored and the claim
/// information will remain in-memory and in each [`ChannelMonitor`] on disk.
///
/// Note that no matter the value of `offload_claim_info`, [`Event::ClaimInfoRequest`]s will be
/// surfaced if needed. If [`Event::PersistClaimInfo`]s have never been surfaced/handled for a
/// node, no [`Event::ClaimInfoRequest`] will be generated.
///
/// # Note
/// `our_peerstorage_encryption_key` must be obtained from [`NodeSigner::get_peer_storage_key`].
/// This key is used to encrypt peer storage backups.
Expand All @@ -601,9 +614,12 @@ where
/// [`NodeSigner`]: crate::sign::NodeSigner
/// [`NodeSigner::get_peer_storage_key`]: crate::sign::NodeSigner::get_peer_storage_key
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Event::PersistClaimInfo`]: crate::events::Event::PersistClaimInfo
/// [`Event::ClaimInfoRequest`]: crate::events::Event::ClaimInfoRequest
pub fn new(
chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P,
_entropy_source: ES, _our_peerstorage_encryption_key: PeerStorageKey,
offload_claim_info: bool,
) -> Self {
Self {
monitors: RwLock::new(new_hash_map()),
Expand All @@ -619,6 +635,7 @@ where
pending_send_only_events: Mutex::new(Vec::new()),
#[cfg(peer_storage)]
our_peerstorage_encryption_key: _our_peerstorage_encryption_key,
offload_claim_info,
}
}

Expand Down Expand Up @@ -770,6 +787,59 @@ where
Ok(())
}

/// Provides the stored [`ClaimInfo`] and associated [`ClaimMetadata`] for a specified transaction.
///
/// This function is called in response to an [`Event::ClaimInfoRequest`] to provide the
/// necessary claim data that was previously persisted using the [`Event::PersistClaimInfo`]
/// event.
///
/// If no matching [`ChannelMonitor`] is found for the provided `channel_id`, it simply returns
/// an `Err(())`, which should never happen if [`Event::ClaimInfoRequest`] was generated. It
/// should be handled by the caller as a failure/panic, most probably a wrong `channel_id` was
/// provided.
///
/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
pub fn provide_claim_info(
&self, channel_id: ChannelId, claim_key: ClaimKey, claim_metadata: ClaimMetadata,
claim_info: ClaimInfo,
) -> Result<(), ()> {
let monitors = self.monitors.read().unwrap();
let monitor_data = monitors.get(&channel_id).ok_or(())?;
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&self.fee_estimator);
monitor_data.monitor.provide_claim_info(
claim_key,
claim_metadata,
claim_info,
&self.broadcaster,
&bounded_fee_estimator,
&self.logger,
);

Ok(())
}

/// Notifies the system that [`ClaimInfo`] associated with a given `claim_key` has been durably
/// persisted.
///
/// This method should be called after a [`ClaimInfo`] is persisted in response to an
/// [`Event::PersistClaimInfo`]. The [`ClaimInfo`] will thus be removed from both in-memory and
/// on-disk storage within the [`ChannelMonitor`].
///
/// If no matching [`ChannelMonitor`] is found for the provided `channel_id`, it simply returns
/// an `Err(())`, which should never happen if [`Event::PersistClaimInfo`] was generated. It
/// should be handled by the caller as a failure/panic, most probably a wrong `channel_id` was
/// provided.
///
/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
pub fn claim_info_persisted(
&self, channel_id: ChannelId, claim_key: ClaimKey,
) -> Result<(), ()> {
let monitors = self.monitors.read().unwrap();
let monitor_data = monitors.get(&channel_id).ok_or(())?;
monitor_data.monitor.claim_info_persisted(&claim_key);
Ok(())
}

/// This wrapper avoids having to update some of our tests for now as they assume the direct
/// chain::Watch API wherein we mark a monitor fully-updated by just calling
/// channel_monitor_updated once with the highest ID.
Expand All @@ -788,6 +858,15 @@ where
self.event_notifier.notify();
}

#[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_claim_info_events(&self) -> Vec<events::Event> {
let mut res = Vec::new();
for (_, monitor) in self.monitors.read().unwrap().iter() {
res.append(&mut monitor.monitor.get_and_clear_claim_info_events());
}
res
}

#[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
use crate::events::EventsProvider;
Expand Down Expand Up @@ -818,7 +897,17 @@ where
self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor),
self.logger,
ev,
handler(ev).await
{
if !self.offload_claim_info {
if let Event::PersistClaimInfo { .. } = &ev {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should actually not be generated instead of simply filtered.

Ok(())
} else {
handler(ev).await
}
} else {
handler(ev).await
}
}
) {
Ok(()) => {},
Err(ReplayEvent()) => {
Expand Down Expand Up @@ -1477,8 +1566,16 @@ where
where
H::Target: EventHandler,
{
let filtering_handler = |event: events::Event| {
if !self.offload_claim_info {
if let events::Event::PersistClaimInfo { .. } = &event {
return Ok(());
}
}
handler.handle_event(event)
};
for monitor_state in self.monitors.read().unwrap().values() {
match monitor_state.monitor.process_pending_events(&handler, &self.logger) {
match monitor_state.monitor.process_pending_events(&&filtering_handler, &self.logger) {
Ok(()) => {},
Err(ReplayEvent()) => {
self.event_notifier.notify();
Expand Down
Loading
Loading