forked from lightningdevkit/rust-lightning
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_utils.rs
More file actions
217 lines (188 loc) · 7.62 KB
/
test_utils.rs
File metadata and controls
217 lines (188 loc) · 7.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
use lightning::events::ClosureReason;
use lightning::ln::functional_test_utils::{
connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block,
create_network, create_node_cfgs, create_node_chanmgrs, send_payment,
};
use lightning::util::persist::{
migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore,
KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN,
};
use lightning::util::test_utils;
use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event};
use std::panic::RefUnwindSafe;
pub(crate) fn do_read_write_remove_list_persist<K: KVStoreSync + RefUnwindSafe>(kv_store: &K) {
let data = [42u8; 32];
let primary_namespace = "testspace";
let secondary_namespace = "testsubspace";
let key = "testkey";
// Test the basic KVStore operations.
kv_store.write(primary_namespace, secondary_namespace, key, &data).unwrap();
// Test empty primary_namespace/secondary_namespace is allowed, but not empty primary_namespace
// and non-empty secondary_namespace, and not empty key.
kv_store.write("", "", key, &data).unwrap();
let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data));
assert!(res.is_err());
let res = std::panic::catch_unwind(|| {
kv_store.write(primary_namespace, secondary_namespace, "", &data)
});
assert!(res.is_err());
let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
assert_eq!(listed_keys.len(), 1);
assert_eq!(listed_keys[0], key);
let read_data = kv_store.read(primary_namespace, secondary_namespace, key).unwrap();
assert_eq!(data, &*read_data);
kv_store.remove(primary_namespace, secondary_namespace, key, false).unwrap();
let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
assert_eq!(listed_keys.len(), 0);
// Ensure we have no issue operating with primary_namespace/secondary_namespace/key being
// KVSTORE_NAMESPACE_KEY_MAX_LEN
let max_chars = "A".repeat(KVSTORE_NAMESPACE_KEY_MAX_LEN);
kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
assert_eq!(listed_keys.len(), 1);
assert_eq!(listed_keys[0], max_chars);
let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap();
assert_eq!(data, &*read_data);
kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap();
let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
assert_eq!(listed_keys.len(), 0);
}
pub(crate) fn do_test_data_migration<S: MigratableKVStore, T: MigratableKVStore>(
source_store: &mut S, target_store: &mut T,
) {
// We fill the source with some bogus keys.
let dummy_data = [42u8; 32];
let num_primary_namespaces = 3;
let num_secondary_namespaces = 3;
let num_keys = 3;
let mut expected_keys = Vec::new();
for i in 0..num_primary_namespaces {
let primary_namespace = if i == 0 {
String::new()
} else {
format!("testspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(i).unwrap())
};
for j in 0..num_secondary_namespaces {
let secondary_namespace = if i == 0 || j == 0 {
String::new()
} else {
format!("testsubspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(j).unwrap())
};
for k in 0..num_keys {
let key =
format!("testkey{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(k).unwrap());
source_store
.write(&primary_namespace, &secondary_namespace, &key, &dummy_data)
.unwrap();
expected_keys.push((primary_namespace.clone(), secondary_namespace.clone(), key));
}
}
}
expected_keys.sort();
expected_keys.dedup();
let mut source_list = source_store.list_all_keys().unwrap();
source_list.sort();
assert_eq!(source_list, expected_keys);
migrate_kv_store_data(source_store, target_store).unwrap();
let mut target_list = target_store.list_all_keys().unwrap();
target_list.sort();
assert_eq!(target_list, expected_keys);
for (p, s, k) in expected_keys.iter() {
assert_eq!(target_store.read(p, s, k).unwrap(), dummy_data);
}
}
// Integration-test the given KVStore implementation. Test relaying a few payments and check that
// the persisted data is updated the appropriate number of times.
pub(crate) fn do_test_store<K: KVStoreSync + Sync>(store_0: &K, store_1: &K) {
let chanmon_cfgs = create_chanmon_cfgs(2);
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let chain_mon_0 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[0].chain_source),
&chanmon_cfgs[0].tx_broadcaster,
&chanmon_cfgs[0].logger,
&chanmon_cfgs[0].fee_estimator,
store_0,
node_cfgs[0].keys_manager,
);
let chain_mon_1 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[1].chain_source),
&chanmon_cfgs[1].tx_broadcaster,
&chanmon_cfgs[1].logger,
&chanmon_cfgs[1].fee_estimator,
store_1,
node_cfgs[1].keys_manager,
);
node_cfgs[0].chain_monitor = chain_mon_0;
node_cfgs[1].chain_monitor = chain_mon_1;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let node_b_id = nodes[1].node.get_our_node_id();
// Check that the persisted channel data is empty before any channels are
// open.
let mut persisted_chan_data_0 =
read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
assert_eq!(persisted_chan_data_0.len(), 0);
let mut persisted_chan_data_1 =
read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
assert_eq!(persisted_chan_data_1.len(), 0);
// Helper to make sure the channel is on the expected update ID.
macro_rules! check_persisted_data {
($expected_update_id: expr) => {
persisted_chan_data_0 =
read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager)
.unwrap();
assert_eq!(persisted_chan_data_0.len(), 1);
for (_, mon) in persisted_chan_data_0.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
}
persisted_chan_data_1 =
read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager)
.unwrap();
assert_eq!(persisted_chan_data_1.len(), 1);
for (_, mon) in persisted_chan_data_1.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
}
};
}
// Create some initial channel and check that a channel was persisted.
let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
check_persisted_data!(0);
// Send a few payments and make sure the monitors are updated to the latest.
send_payment(&nodes[0], &vec![&nodes[1]][..], 8000000);
check_persisted_data!(5);
send_payment(&nodes[1], &vec![&nodes[0]][..], 4000000);
check_persisted_data!(10);
// Force close because cooperative close doesn't result in any persisted
// updates.
let message = "Channel force-closed".to_owned();
let chan_id = nodes[0].node.list_channels()[0].channel_id;
nodes[0]
.node
.force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone())
.unwrap();
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
connect_block(
&nodes[1],
&create_dummy_block(
nodes[0].best_block_hash(),
42,
vec![node_txn[0].clone(), node_txn[0].clone()],
),
);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(
nodes[1],
1,
ClosureReason::CommitmentTxConfirmed,
[nodes[0].node.get_our_node_id()],
100000
);
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.
check_persisted_data!(11);
}