diff --git a/doc/schema.md b/doc/schema.md index 4875cb4df..d9dabf089 100644 --- a/doc/schema.md +++ b/doc/schema.md @@ -25,17 +25,16 @@ Each block results in the following new rows: * `"M{blockhash}" → "{metadata}"` (block weight, size and number of txs) - * `"D{blockhash}" → ""` (signifies the block is done processing) + * `"D{blockhash}" → ""` (signifies the block was added) -Each transaction results in the following new rows: +Each transaction results in the following new row: * `"T{txid}" → "{serialized-transaction}"` - * `"C{txid}{confirmed-blockhash}" → ""` (a list of blockhashes where `txid` was seen to be confirmed) - -Each output results in the following new row: +Each output results in the following new rows: * `"O{txid}{vout}" → "{scriptpubkey}{value}"` + * `"a{funding-address-str}" → ""` (for prefix address search, only saved when `--address-search` is enabled) When the indexer is synced up to the tip of the chain, the hash of the tip is saved as following: @@ -43,16 +42,23 @@ When the indexer is synced up to the tip of the chain, the hash of the tip is sa ### `history` -Each funding output (except for provably unspendable ones when `--index-unspendables` is not enabled) results in the following new rows (`H` is for history, `F` is for funding): +Each transaction results in the following new row: + + * `"C{txid}" → "{confirmed-height}"` + +Each funding output (except for provably unspendable ones when `--index-unspendables` is not enabled) results in the following new row (`H` is for history, `F` is for funding): * `"H{funding-scripthash}{funding-height}F{funding-txid:vout}{value}" → ""` - * `"a{funding-address-str}" → ""` (for prefix address search, only saved when `--address-search` is enabled) Each spending input (except the coinbase) results in the following new rows (`S` is for spending): * `"H{funding-scripthash}{spending-height}S{spending-txid:vin}{funding-txid:vout}{value}" → ""` - * `"S{funding-txid:vout}{spending-txid:vin}" → ""` + * `"S{funding-txid:vout}" → "{spending-txid:vin}{spending-height}"` + +Each block results in the following new row: + + * `"D{blockhash}" → ""` (signifies the block was indexed) #### Elements only diff --git a/flake.nix b/flake.nix index ece2db67d..5f9ae1136 100644 --- a/flake.nix +++ b/flake.nix @@ -33,7 +33,8 @@ src = craneLib.cleanCargoSource ./.; - nativeBuildInputs = with pkgs; [ rustToolchain clang ]; # required only at build time + # Build-time deps; include libclang so rocksdb-sys/bindgen can find a shared libclang. + nativeBuildInputs = with pkgs; [ rustToolchain clang libclang ]; buildInputs = with pkgs; [ ]; # also required at runtime envVars = diff --git a/src/bin/db-migrate-v1-to-v2.rs b/src/bin/db-migrate-v1-to-v2.rs new file mode 100644 index 000000000..9289f1d77 --- /dev/null +++ b/src/bin/db-migrate-v1-to-v2.rs @@ -0,0 +1,287 @@ +use std::collections::BTreeSet; +use std::convert::TryInto; +use std::str; + +use itertools::Itertools; +use log::{debug, info, trace}; +use rocksdb::WriteBatch; + +use bitcoin::hashes::Hash; + +use electrs::chain::{BlockHash, Txid}; +use electrs::new_index::db::DBFlush; +use electrs::new_index::schema::{ + lookup_confirmations, FullHash, Store, TxConfRow as V2TxConfRow, TxEdgeRow as V2TxEdgeRow, + TxHistoryKey, +}; +use electrs::util::bincode::{deserialize_big, deserialize_little, serialize_little}; +use electrs::{config::Config, metrics::Metrics}; + +const FROM_DB_VERSION: u32 = 1; +const TO_DB_VERSION: u32 = 2; + +const BATCH_SIZE: usize = 15000; +const PROGRESS_EVERY: usize = BATCH_SIZE * 50; + +// For Elements-based chains the 'I' asset history index is migrated too +#[cfg(not(feature = "liquid"))] +const HISTORY_PREFIXES: [u8; 1] = [b'H']; +#[cfg(feature = "liquid")] +const HISTORY_PREFIXES: [u8; 2] = [b'H', b'I']; + +fn main() { + let config = Config::from_args(); + let metrics = Metrics::new(config.monitoring_addr); + let store = Store::open(&config, &metrics, false); + + let txstore_db = store.txstore_db(); + let history_db = store.history_db(); + let cache_db = store.cache_db(); + let headers = store.headers(); + let tip_height = headers.best_height() as u32; + + // Check the DB version under `V` matches the expected version + for db in [txstore_db, history_db, cache_db] { + let ver_bytes = db.get(b"V").expect("missing DB version"); + let ver: u32 = deserialize_little(&ver_bytes[0..4]).unwrap(); + assert_eq!(ver, FROM_DB_VERSION, "unexpected DB version {}", ver); + } + + // Utility to log progress once every PROGRESS_EVERY ticks + let mut tick = 0usize; + macro_rules! progress { + ($($arg:tt)+) => {{ + tick = tick.wrapping_add(1); + if tick % PROGRESS_EVERY == 0 { + debug!($($arg)+); + } + }}; + } + + // 1. Migrate the address prefix search index + // Moved as-is from the history db to the txstore db + info!("[1/4] migrating address prefix search index..."); + let address_iter = history_db.iter_scan(b"a"); + for chunk in &address_iter.chunks(BATCH_SIZE) { + let mut batch = WriteBatch::default(); + for row in chunk { + progress!("[1/4] at {}", str::from_utf8(&row.key[1..]).unwrap()); + batch.put(row.key, row.value); + } + // Write batches without flushing (sync and WAL disabled) + trace!("[1/4] writing batch of {} ops", batch.len()); + txstore_db.write_batch(batch, DBFlush::Disable); + } + // Flush the txstore db, only then delete the original rows from the history db + info!("[1/4] flushing V2 address index to txstore db"); + txstore_db.flush(); + info!("[1/4] deleting V1 address index from history db"); + history_db.delete_range(b"a", b"b", DBFlush::Enable); + + // 2. Migrate the TxConf transaction confirmation index + // - Moved from the txstore db to the history db + // - Changed from a set of blocks seen to include the tx to a single block (that is part of the best chain) + // - Changed from the block hash to the block height + // - Entries originating from stale blocks are removed + // Steps 3/4 depend on this index getting migrated first + info!("[2/4] migrating TxConf index..."); + let txconf_iter = txstore_db.iter_scan(b"C"); + for chunk in &txconf_iter.chunks(BATCH_SIZE) { + let mut batch = WriteBatch::default(); + for v1_row in chunk { + let v1_txconf: V1TxConfKey = + deserialize_little(&v1_row.key).expect("invalid TxConfKey"); + let blockhash = BlockHash::from_byte_array(v1_txconf.blockhash); + if let Some(header) = headers.header_by_blockhash(&blockhash) { + // The blockhash is still part of the best chain, use its height to construct the V2 row + let v2_row = V2TxConfRow::new(v1_txconf.txid, header.height() as u32).into_row(); + batch.put(v2_row.key, v2_row.value); + } else { + // The transaction was reorged, don't write the V2 entry + // trace!("[2/4] skipping reorged TxConf for {}", Txid::from_byte_array(txconf.txid)); + } + progress!( + "[2/4] migrating TxConf index ~{:.2}%", + est_hash_progress(&v1_txconf.txid) + ); + } + // Write batches without flushing (sync and WAL disabled) + trace!("[2/4] writing batch of {} ops", batch.len()); + history_db.write_batch(batch, DBFlush::Disable); + } + // Flush the history db, only then delete the original rows from the txstore db + info!("[2/4] flushing V2 TxConf to history db"); + history_db.flush(); + info!("[2/4] deleting V1 TxConf from txstore db"); + txstore_db.delete_range(b"C", b"D", DBFlush::Enable); + + // 3. Migrate the TxEdge spending index + // - Changed from a set of inputs seen to spend the outpoint to a single spending input (that is part of the best chain) + // - Keep the height of the spending tx + // - Entries originating from stale blocks are removed + info!("[3/4] migrating TxEdge index..."); + let txedge_iter = history_db.iter_scan(b"S"); + for chunk in &txedge_iter.chunks(BATCH_SIZE) { + let mut v1_edges = Vec::with_capacity(BATCH_SIZE); + let mut spending_txids = BTreeSet::new(); + for v1_row in chunk { + if let Ok(v1_edge) = deserialize_little::(&v1_row.key) { + spending_txids.insert(Txid::from_byte_array(v1_edge.spending_txid)); + v1_edges.push((v1_edge, v1_row.key)); + } + // Rows with keys that cannot be deserialized into V1TxEdgeKey are assumed to already be upgraded, and skipped + // This is necessary to properly recover if the migration stops halfway through. + } + + // Lookup the confirmation status for the entire chunk using a MultiGet operation + let confirmations = lookup_confirmations(history_db, tip_height, spending_txids); + + let mut batch = WriteBatch::default(); + for (v1_edge, v1_db_key) in v1_edges { + let spending_txid = Txid::from_byte_array(v1_edge.spending_txid); + + // Remove the old V1 entry. V2 entries use a different key. + batch.delete(v1_db_key); + + if let Some(spending_height) = confirmations.get(&spending_txid) { + // Re-add the V2 entry if it is still part of the best chain + let v2_row = V2TxEdgeRow::new( + v1_edge.funding_txid, + v1_edge.funding_vout, + v1_edge.spending_txid, + v1_edge.spending_vin, + *spending_height, // now with the height included + ) + .into_row(); + batch.put(v2_row.key, v2_row.value); + } else { + // The spending transaction was reorged, don't write the V2 entry + //trace!("[3/4] skipping reorged TxEdge for {}", spending_txid); + } + + progress!( + "[3/4] migrating TxEdge index ~{:.2}%", + est_hash_progress(&v1_edge.funding_txid) + ); + } + // Write batches without flushing (sync and WAL disabled) + trace!("[3/4] writing batch of {} ops", batch.len()); + history_db.write_batch(batch, DBFlush::Disable); + } + info!("[3/4] flushing V2 TxEdge index to history db"); + history_db.flush(); + + // 4. Migrate the TxHistory index + // Entries originating from stale blocks are removed, with no other changes + info!("[4/4] migrating TxHistory index..."); + for prefix in HISTORY_PREFIXES { + let txhistory_iter = history_db.iter_scan(&[prefix]); + info!("[4/4] migrating TxHistory index {}", prefix as char); + for chunk in &txhistory_iter.chunks(BATCH_SIZE) { + let mut history_entries = Vec::with_capacity(BATCH_SIZE); + let mut history_txids = BTreeSet::new(); + for row in chunk { + let hist: TxHistoryKey = deserialize_big(&row.key).expect("invalid TxHistoryKey"); + history_txids.insert(hist.txinfo.get_txid()); + history_entries.push((hist, row.key)); + } + + // Lookup the confirmation status for the entire chunk using a MultiGet operation + let confirmations = lookup_confirmations(history_db, tip_height, history_txids); + + let mut batch = WriteBatch::default(); + for (hist, db_key) in history_entries { + let hist_txid = hist.txinfo.get_txid(); + if confirmations.get(&hist_txid) != Some(&hist.confirmed_height) { + // The history entry originated from a stale block, remove it + batch.delete(db_key); + // trace!("[4/4] removing reorged TxHistory for {}", hist.txinfo.get_txid()); + } + progress!( + "[4/4] migrating TxHistory index {} ~{:.2}%", + prefix as char, + est_hash_progress(&hist.hash) + ); + } + // Write batches without flushing (sync and WAL disabled) + trace!("[4/4] writing batch of {} deletions", batch.len()); + if !batch.is_empty() { + history_db.write_batch(batch, DBFlush::Disable); + } + } + } + info!("[4/4] flushing TxHistory deletions to history db"); + history_db.flush(); + + // Update the DB version under `V` + let ver_bytes = serialize_little(&(TO_DB_VERSION, config.light_mode)).unwrap(); + for db in [txstore_db, history_db, cache_db] { + db.put_sync(b"V", &ver_bytes); + } + + // Compact everything once at the end + txstore_db.full_compaction(); + history_db.full_compaction(); +} + +// Estimates progress using the first 4 bytes, relying on RocksDB's lexicographic key ordering and uniform hash distribution +fn est_hash_progress(hash: &FullHash) -> f32 { + u32::from_be_bytes(hash[0..4].try_into().unwrap()) as f32 / u32::MAX as f32 * 100f32 +} + +#[derive(Debug, serde::Deserialize)] +struct V1TxConfKey { + #[allow(dead_code)] + code: u8, + txid: FullHash, + blockhash: FullHash, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +struct V1TxEdgeKey { + code: u8, + funding_txid: FullHash, + funding_vout: u16, + spending_txid: FullHash, + spending_vin: u16, +} + +/* +use bitcoin::hex::DisplayHex; + +fn dump_db(db: &DB, label: &str, prefix: &[u8]) { + debug!("dumping {}", label); + for item in db.iter_scan(prefix) { + trace!( + "[{}] {} => {}", + label, + fmt_key(&item.key), + &item.value.to_lower_hex_string() + ); + } +} + +fn debug_batch(batch: &WriteBatch, label: &'static str) { + debug!("batch {} with {} ops", label, batch.len()); + batch.iterate(&mut WriteBatchLogIterator(label)); +} + +struct WriteBatchLogIterator(&'static str); +impl rocksdb::WriteBatchIterator for WriteBatchLogIterator { + fn put(&mut self, key: Box<[u8]>, value: Box<[u8]>) { + trace!( + "[batch {}] PUT {} => {}", + self.0, + fmt_key(&key), + value.to_lower_hex_string() + ); + } + fn delete(&mut self, key: Box<[u8]>) { + trace!("[batch {}] DELETE {}", self.0, fmt_key(&key)); + } +} + +fn fmt_key(key: &[u8]) -> String { + format!("{}-{}", key[0] as char, &key[1..].to_lower_hex_string()) +} +*/ diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 59f957ae5..059adcb6c 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -4,13 +4,8 @@ extern crate log; extern crate electrs; -use crossbeam_channel::{self as channel}; -use error_chain::ChainedError; -use std::{env, process, thread}; -use std::sync::{Arc, RwLock}; -use std::time::Duration; use bitcoin::hex::DisplayHex; -use rand::{rng, RngCore}; +use crossbeam_channel::{self as channel}; use electrs::{ config::Config, daemon::Daemon, @@ -21,6 +16,11 @@ use electrs::{ rest, signal::Waiter, }; +use error_chain::ChainedError; +use rand::{rng, RngCore}; +use std::sync::{Arc, RwLock}; +use std::time::Duration; +use std::{env, process, thread}; #[cfg(feature = "otlp-tracing")] use electrs::otlp_trace; @@ -68,7 +68,7 @@ fn run_server(config: Arc, salt_rwlock: Arc>) -> Result<( signal.clone(), &metrics, )?); - let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config, &metrics)); + let store = Arc::new(Store::open(&config, &metrics, true)); let mut indexer = Indexer::open( Arc::clone(&store), fetch_from(&config, &store), diff --git a/src/bin/popular-scripts.rs b/src/bin/popular-scripts.rs index a7b245817..0274425b5 100644 --- a/src/bin/popular-scripts.rs +++ b/src/bin/popular-scripts.rs @@ -2,13 +2,16 @@ extern crate electrs; use bitcoin::hex::DisplayHex; use electrs::{ - config::Config, metrics::Metrics, new_index::{Store, TxHistoryKey}, util::bincode + config::Config, + metrics::Metrics, + new_index::{Store, TxHistoryKey}, + util::bincode, }; fn main() { let config = Config::from_args(); let metrics = Metrics::new(config.monitoring_addr); - let store = Store::open(&config.db_path.join("newindex"), &config, &metrics); + let store = Store::open(&config, &metrics, true); let mut iter = store.history_db().raw_iterator(); iter.seek(b"H"); diff --git a/src/bin/tx-fingerprint-stats.rs b/src/bin/tx-fingerprint-stats.rs index 83b3f213a..f96c7e7e4 100644 --- a/src/bin/tx-fingerprint-stats.rs +++ b/src/bin/tx-fingerprint-stats.rs @@ -24,7 +24,7 @@ fn main() { let signal = Waiter::start(crossbeam_channel::never()); let config = Config::from_args(); let metrics = Metrics::new(config.monitoring_addr); - let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config, &metrics)); + let store = Arc::new(Store::open(&config, &metrics, true)); let metrics = Metrics::new(config.monitoring_addr); metrics.start(); diff --git a/src/daemon.rs b/src/daemon.rs index cf381fa8d..500ee1ed1 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -523,28 +523,32 @@ impl Daemon { // buffering the replies into a vector. If any of the requests fail, processing is terminated and an Err is returned. #[trace] fn requests(&self, method: &str, params_list: Vec) -> Result> { - self.requests_iter(method, params_list).collect() + self.rpc_threads + .install(|| self.requests_iter(method, params_list).collect()) } // Send requests in parallel over multiple RPC connections, iterating over the results without buffering them. // Errors are included in the iterator and do not terminate other pending requests. + // + // IMPORTANT: The returned parallel iterator must be collected inside self.rpc_threads.install() + // to ensure it runs on the daemon's own thread pool, not the global rayon pool. This is necessary + // because the per-thread DAEMON_INSTANCE thread-locals would otherwise be shared across different + // daemon instances in the same process (e.g. during parallel tests). #[trace] fn requests_iter<'a>( &'a self, method: &'a str, params_list: Vec, ) -> impl ParallelIterator> + IndexedParallelIterator + 'a { - self.rpc_threads.install(move || { - params_list.into_par_iter().map(move |params| { - // Store a local per-thread Daemon, each with its own TCP connection. These will - // get initialized as necessary for the `rpc_threads` pool thread managed by rayon. - thread_local!(static DAEMON_INSTANCE: OnceCell = OnceCell::new()); - - DAEMON_INSTANCE.with(|daemon| { - daemon - .get_or_init(|| self.retry_reconnect()) - .retry_request(&method, ¶ms) - }) + params_list.into_par_iter().map(move |params| { + // Store a local per-thread Daemon, each with its own TCP connection. These will + // get initialized as necessary for the `rpc_threads` pool thread managed by rayon. + thread_local!(static DAEMON_INSTANCE: OnceCell = OnceCell::new()); + + DAEMON_INSTANCE.with(|daemon| { + daemon + .get_or_init(|| self.retry_reconnect()) + .retry_request(&method, ¶ms) }) }) } @@ -647,20 +651,22 @@ impl Daemon { .map(|txhash| json!([txhash, /*verbose=*/ false])) .collect(); - self.requests_iter("getrawtransaction", params_list) - .zip(txids) - .filter_map(|(res, txid)| match res { - Ok(val) => Some(tx_from_value(val).map(|tx| (**txid, tx))), - // Ignore 'tx not found' errors - Err(Error(ErrorKind::RpcError(code, _, _), _)) - if code == RPC_INVALID_ADDRESS_OR_KEY => - { - None - } - // Terminate iteration if any other errors are encountered - Err(e) => Some(Err(e)), - }) - .collect() + self.rpc_threads.install(|| { + self.requests_iter("getrawtransaction", params_list) + .zip(txids) + .filter_map(|(res, txid)| match res { + Ok(val) => Some(tx_from_value(val).map(|tx| (**txid, tx))), + // Ignore 'tx not found' errors + Err(Error(ErrorKind::RpcError(code, _, _), _)) + if code == RPC_INVALID_ADDRESS_OR_KEY => + { + None + } + // Terminate iteration if any other errors are encountered + Err(e) => Some(Err(e)), + }) + .collect() + }) } #[trace] @@ -773,11 +779,12 @@ impl Daemon { result.append(&mut headers); - info!("downloaded {}/{} block headers ({:.0}%)", + info!( + "downloaded {}/{} block headers ({:.0}%)", result.len(), tip_height, - result.len() as f32 / tip_height as f32 * 100.0); - + result.len() as f32 / tip_height as f32 * 100.0 + ); } let mut blockhash = *DEFAULT_BLOCKHASH; diff --git a/src/electrum/server.rs b/src/electrum/server.rs index ea5579699..dc4e7fa75 100644 --- a/src/electrum/server.rs +++ b/src/electrum/server.rs @@ -15,10 +15,6 @@ use serde_json::{from_str, Value}; use electrs_macros::trace; -#[cfg(not(feature = "liquid"))] -use bitcoin::consensus::encode::serialize_hex; -#[cfg(feature = "liquid")] -use elements::encode::serialize_hex; use crate::chain::Txid; use crate::config::{Config, RpcLogging}; use crate::electrum::{get_electrum_height, ProtocolVersion}; @@ -27,6 +23,10 @@ use crate::metrics::{Gauge, HistogramOpts, HistogramVec, MetricOpts, Metrics}; use crate::new_index::{Query, Utxo}; use crate::util::electrum_merkle::{get_header_merkle_proof, get_id_from_pos, get_tx_merkle_proof}; use crate::util::{create_socket, spawn_thread, BlockId, BoolThen, Channel, FullHash, HeaderEntry}; +#[cfg(not(feature = "liquid"))] +use bitcoin::consensus::encode::serialize_hex; +#[cfg(feature = "liquid")] +use elements::encode::serialize_hex; const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION"); const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::new(1, 4); @@ -799,7 +799,7 @@ impl RPC { config: Arc, query: Arc, metrics: &Metrics, - salt_rwlock: Arc> + salt_rwlock: Arc>, ) -> RPC { let stats = Arc::new(Stats { latency: metrics.histogram_vec( diff --git a/src/elements/asset.rs b/src/elements/asset.rs index 726431b54..149ebd703 100644 --- a/src/elements/asset.rs +++ b/src/elements/asset.rs @@ -13,7 +13,7 @@ use crate::elements::registry::{AssetMeta, AssetRegistry}; use crate::errors::*; use crate::new_index::schema::{TxHistoryInfo, TxHistoryKey, TxHistoryRow}; use crate::new_index::{db::DBFlush, ChainQuery, DBRow, Mempool, Query}; -use crate::util::{bincode, full_hash, Bytes, FullHash, TransactionStatus, TxInput}; +use crate::util::{bincode, full_hash, BlockId, Bytes, FullHash, TransactionStatus, TxInput}; lazy_static! { pub static ref NATIVE_ASSET_ID: AssetId = @@ -509,7 +509,7 @@ where // save updated stats to cache if let Some(lastblock) = lastblock { - chain.store().cache_db().write( + chain.store().cache_db().write_rows( vec![asset_cache_row(asset_id, &newstats, &lastblock)], DBFlush::Enable, ); @@ -526,13 +526,14 @@ fn chain_asset_stats_delta( start_height: usize, apply_fn: AssetStatApplyFn, ) -> (T, Option) { + let headers = chain.store().headers(); let history_iter = chain .history_iter_scan(b'I', &asset_id.into_inner()[..], start_height) .map(TxHistoryRow::from_row) .filter_map(|history| { - chain - .tx_confirming_block(&history.get_txid()) - .map(|blockid| (history, blockid)) + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + let header = headers.header_by_height(history.key.confirmed_height as usize)?; + Some((history, BlockId::from(header))) }); let mut stats = init_stats; diff --git a/src/new_index/db.rs b/src/new_index/db.rs index 7527b3f5d..e422b8afe 100644 --- a/src/new_index/db.rs +++ b/src/new_index/db.rs @@ -11,7 +11,7 @@ use crate::config::Config; use crate::new_index::db_metrics::RocksDbMetrics; use crate::util::{bincode, spawn_thread, Bytes}; -static DB_VERSION: u32 = 1; +static DB_VERSION: u32 = 2; #[derive(Debug, Eq, PartialEq)] pub struct DBRow { @@ -38,8 +38,8 @@ impl<'a> Iterator for ScanIterator<'a> { return None; } Some(DBRow { - key: key.to_vec(), - value: value.to_vec(), + key: key.into_vec(), + value: value.into_vec(), }) } } @@ -87,7 +87,7 @@ pub enum DBFlush { } impl DB { - pub fn open(path: &Path, config: &Config) -> DB { + pub fn open(path: &Path, config: &Config, verify_compat: bool) -> DB { debug!("opening DB at {:?}", path); let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); @@ -97,8 +97,9 @@ impl DB { db_opts.set_target_file_size_base(1_073_741_824); db_opts.set_disable_auto_compactions(!config.initial_sync_compaction); // for initial bulk load - - let parallelism: i32 = config.db_parallelism.try_into() + let parallelism: i32 = config + .db_parallelism + .try_into() .expect("db_parallelism value too large for i32"); // Configure parallelism (background jobs and thread pools) @@ -117,9 +118,11 @@ impl DB { db_opts.set_block_based_table_factory(&block_opts); let db = DB { - db: Arc::new(rocksdb::DB::open(&db_opts, path).expect("failed to open RocksDB")) + db: Arc::new(rocksdb::DB::open(&db_opts, path).expect("failed to open RocksDB")), }; - db.verify_compatibility(config); + if verify_compat { + db.verify_compatibility(config); + } db } @@ -170,7 +173,7 @@ impl DB { } } - pub fn write(&self, mut rows: Vec, flush: DBFlush) { + pub fn write_rows(&self, mut rows: Vec, flush: DBFlush) { log::trace!( "writing {} rows to {:?}, flush={:?}", rows.len(), @@ -182,6 +185,20 @@ impl DB { for row in rows { batch.put(&row.key, &row.value); } + self.write_batch(batch, flush) + } + + pub fn delete_rows(&self, mut rows: Vec, flush: DBFlush) { + log::trace!("deleting {} rows from {:?}", rows.len(), self.db,); + rows.sort_unstable_by(|a, b| a.key.cmp(&b.key)); + let mut batch = rocksdb::WriteBatch::default(); + for row in rows { + batch.delete(&row.key); + } + self.write_batch(batch, flush) + } + + pub fn write_batch(&self, batch: rocksdb::WriteBatch, flush: DBFlush) { let do_flush = match flush { DBFlush::Enable => true, DBFlush::Disable => false, @@ -218,21 +235,21 @@ impl DB { self.db.multi_get(keys) } + /// Remove database entries in the range [from, to) + pub fn delete_range>(&self, from: K, to: K, flush: DBFlush) { + let mut batch = rocksdb::WriteBatch::default(); + batch.delete_range(from, to); + self.write_batch(batch, flush); + } + fn verify_compatibility(&self, config: &Config) { - let mut compatibility_bytes = bincode::serialize_little(&DB_VERSION).unwrap(); - - if config.light_mode { - // append a byte to indicate light_mode is enabled. - // we're not letting bincode serialize this so that the compatiblity bytes won't change - // (and require a reindex) when light_mode is disabled. this should be chagned the next - // time we bump DB_VERSION and require a re-index anyway. - compatibility_bytes.push(1); - } + let compatibility_bytes = + bincode::serialize_little(&(DB_VERSION, config.light_mode)).unwrap(); match self.get(b"V") { None => self.put(b"V", &compatibility_bytes), - Some(ref x) if x != &compatibility_bytes => { - panic!("Incompatible database found. Please reindex.") + Some(x) if x != compatibility_bytes => { + panic!("Incompatible database found. Please reindex or migrate.") } Some(_) => (), } @@ -251,39 +268,114 @@ impl DB { }; spawn_thread("db_stats_exporter", move || loop { - update_gauge(&db_metrics.num_immutable_mem_table, "rocksdb.num-immutable-mem-table"); - update_gauge(&db_metrics.mem_table_flush_pending, "rocksdb.mem-table-flush-pending"); + update_gauge( + &db_metrics.num_immutable_mem_table, + "rocksdb.num-immutable-mem-table", + ); + update_gauge( + &db_metrics.mem_table_flush_pending, + "rocksdb.mem-table-flush-pending", + ); update_gauge(&db_metrics.compaction_pending, "rocksdb.compaction-pending"); update_gauge(&db_metrics.background_errors, "rocksdb.background-errors"); - update_gauge(&db_metrics.cur_size_active_mem_table, "rocksdb.cur-size-active-mem-table"); - update_gauge(&db_metrics.cur_size_all_mem_tables, "rocksdb.cur-size-all-mem-tables"); - update_gauge(&db_metrics.size_all_mem_tables, "rocksdb.size-all-mem-tables"); - update_gauge(&db_metrics.num_entries_active_mem_table, "rocksdb.num-entries-active-mem-table"); - update_gauge(&db_metrics.num_entries_imm_mem_tables, "rocksdb.num-entries-imm-mem-tables"); - update_gauge(&db_metrics.num_deletes_active_mem_table, "rocksdb.num-deletes-active-mem-table"); - update_gauge(&db_metrics.num_deletes_imm_mem_tables, "rocksdb.num-deletes-imm-mem-tables"); + update_gauge( + &db_metrics.cur_size_active_mem_table, + "rocksdb.cur-size-active-mem-table", + ); + update_gauge( + &db_metrics.cur_size_all_mem_tables, + "rocksdb.cur-size-all-mem-tables", + ); + update_gauge( + &db_metrics.size_all_mem_tables, + "rocksdb.size-all-mem-tables", + ); + update_gauge( + &db_metrics.num_entries_active_mem_table, + "rocksdb.num-entries-active-mem-table", + ); + update_gauge( + &db_metrics.num_entries_imm_mem_tables, + "rocksdb.num-entries-imm-mem-tables", + ); + update_gauge( + &db_metrics.num_deletes_active_mem_table, + "rocksdb.num-deletes-active-mem-table", + ); + update_gauge( + &db_metrics.num_deletes_imm_mem_tables, + "rocksdb.num-deletes-imm-mem-tables", + ); update_gauge(&db_metrics.estimate_num_keys, "rocksdb.estimate-num-keys"); - update_gauge(&db_metrics.estimate_table_readers_mem, "rocksdb.estimate-table-readers-mem"); - update_gauge(&db_metrics.is_file_deletions_enabled, "rocksdb.is-file-deletions-enabled"); + update_gauge( + &db_metrics.estimate_table_readers_mem, + "rocksdb.estimate-table-readers-mem", + ); + update_gauge( + &db_metrics.is_file_deletions_enabled, + "rocksdb.is-file-deletions-enabled", + ); update_gauge(&db_metrics.num_snapshots, "rocksdb.num-snapshots"); - update_gauge(&db_metrics.oldest_snapshot_time, "rocksdb.oldest-snapshot-time"); + update_gauge( + &db_metrics.oldest_snapshot_time, + "rocksdb.oldest-snapshot-time", + ); update_gauge(&db_metrics.num_live_versions, "rocksdb.num-live-versions"); - update_gauge(&db_metrics.current_super_version_number, "rocksdb.current-super-version-number"); - update_gauge(&db_metrics.estimate_live_data_size, "rocksdb.estimate-live-data-size"); - update_gauge(&db_metrics.min_log_number_to_keep, "rocksdb.min-log-number-to-keep"); - update_gauge(&db_metrics.min_obsolete_sst_number_to_keep, "rocksdb.min-obsolete-sst-number-to-keep"); - update_gauge(&db_metrics.total_sst_files_size, "rocksdb.total-sst-files-size"); - update_gauge(&db_metrics.live_sst_files_size, "rocksdb.live-sst-files-size"); + update_gauge( + &db_metrics.current_super_version_number, + "rocksdb.current-super-version-number", + ); + update_gauge( + &db_metrics.estimate_live_data_size, + "rocksdb.estimate-live-data-size", + ); + update_gauge( + &db_metrics.min_log_number_to_keep, + "rocksdb.min-log-number-to-keep", + ); + update_gauge( + &db_metrics.min_obsolete_sst_number_to_keep, + "rocksdb.min-obsolete-sst-number-to-keep", + ); + update_gauge( + &db_metrics.total_sst_files_size, + "rocksdb.total-sst-files-size", + ); + update_gauge( + &db_metrics.live_sst_files_size, + "rocksdb.live-sst-files-size", + ); update_gauge(&db_metrics.base_level, "rocksdb.base-level"); - update_gauge(&db_metrics.estimate_pending_compaction_bytes, "rocksdb.estimate-pending-compaction-bytes"); - update_gauge(&db_metrics.num_running_compactions, "rocksdb.num-running-compactions"); - update_gauge(&db_metrics.num_running_flushes, "rocksdb.num-running-flushes"); - update_gauge(&db_metrics.actual_delayed_write_rate, "rocksdb.actual-delayed-write-rate"); + update_gauge( + &db_metrics.estimate_pending_compaction_bytes, + "rocksdb.estimate-pending-compaction-bytes", + ); + update_gauge( + &db_metrics.num_running_compactions, + "rocksdb.num-running-compactions", + ); + update_gauge( + &db_metrics.num_running_flushes, + "rocksdb.num-running-flushes", + ); + update_gauge( + &db_metrics.actual_delayed_write_rate, + "rocksdb.actual-delayed-write-rate", + ); update_gauge(&db_metrics.is_write_stopped, "rocksdb.is-write-stopped"); - update_gauge(&db_metrics.estimate_oldest_key_time, "rocksdb.estimate-oldest-key-time"); - update_gauge(&db_metrics.block_cache_capacity, "rocksdb.block-cache-capacity"); + update_gauge( + &db_metrics.estimate_oldest_key_time, + "rocksdb.estimate-oldest-key-time", + ); + update_gauge( + &db_metrics.block_cache_capacity, + "rocksdb.block-cache-capacity", + ); update_gauge(&db_metrics.block_cache_usage, "rocksdb.block-cache-usage"); - update_gauge(&db_metrics.block_cache_pinned_usage, "rocksdb.block-cache-pinned-usage"); + update_gauge( + &db_metrics.block_cache_pinned_usage, + "rocksdb.block-cache-pinned-usage", + ); thread::sleep(Duration::from_secs(5)); }); } diff --git a/src/new_index/fetch.rs b/src/new_index/fetch.rs index 7906fb206..58dab9b39 100644 --- a/src/new_index/fetch.rs +++ b/src/new_index/fetch.rs @@ -89,7 +89,8 @@ fn bitcoind_fetcher( let total_blocks_fetched = new_headers.len(); for entries in new_headers.chunks(100) { if fetcher_count % 50 == 0 && total_blocks_fetched >= 50 { - info!("fetching blocks {}/{} ({:.1}%)", + info!( + "fetching blocks {}/{} ({:.1}%)", blocks_fetched, total_blocks_fetched, blocks_fetched as f32 / total_blocks_fetched as f32 * 100.0 @@ -148,10 +149,11 @@ fn blkfiles_fetcher( .into_iter() .filter_map(|(block, size)| { index += 1; - debug!("fetch block {:}/{:} {:.2}%", + debug!( + "fetch block {:}/{:} {:.2}%", index, block_count, - (index/block_count) as f32/100.0 + (index / block_count) as f32 / 100.0 ); let blockhash = block.block_hash(); entry_map @@ -188,7 +190,8 @@ fn blkfiles_reader(blk_files: Vec, xor_key: Option<[u8; 8]>) -> Fetcher spawn_thread("blkfiles_reader", move || { let blk_files_len = blk_files.len(); for (count, path) in blk_files.iter().enumerate() { - info!("block file reading {:}/{:} {:.2}%", + info!( + "block file reading {:}/{:} {:.2}%", count, blk_files_len, count / blk_files_len diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index c5788e683..a28fa193d 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -35,6 +35,8 @@ pub struct Mempool { config: Arc, txstore: HashMap, feeinfo: HashMap, + // Map txid -> scripthashes touched, to prune efficiently on eviction. + tx_scripthashes: HashMap>, history: HashMap>, // ScriptHash -> {history_entries} edges: HashMap, // OutPoint -> (spending_txid, spending_vin) recent: ArrayDeque, // The N most recent txs to enter the mempool @@ -71,6 +73,7 @@ impl Mempool { config, txstore: HashMap::new(), feeinfo: HashMap::new(), + tx_scripthashes: HashMap::new(), history: HashMap::new(), edges: HashMap::new(), recent: ArrayDeque::new(), @@ -300,7 +303,8 @@ impl Mempool { .latency .with_label_values(&["update_backlog_stats"]) .start_timer(); - self.backlog_stats = (BacklogStats::new(&self.feeinfo), Instant::now()); + let feeinfo: Vec<&TxFeeInfo> = self.feeinfo.values().collect(); + self.backlog_stats = (BacklogStats::from_feeinfo_slice(&feeinfo), Instant::now()); } #[trace] @@ -354,6 +358,7 @@ impl Mempool { let prevouts = extract_tx_prevouts(&tx, &txos, false); let txid_bytes = full_hash(&txid[..]); + let mut tx_scripthashes = Vec::with_capacity(tx.input.len() + tx.output.len()); // best-effort capacity hint // Get feeinfo for caching and recent tx overview let feeinfo = TxFeeInfo::new(&tx, &prevouts, self.config.network_type); @@ -410,11 +415,15 @@ impl Mempool { // Index funding/spending history entries and spend edges for (scripthash, entry) in funding.chain(spending) { + tx_scripthashes.push(scripthash); self.history .entry(scripthash) .or_insert_with(Vec::new) .push(entry); } + tx_scripthashes.sort_unstable(); + tx_scripthashes.dedup(); + self.tx_scripthashes.insert(txid, tx_scripthashes); for (i, txi) in tx.input.iter().enumerate() { self.edges.insert(txi.previous_output, (txid, i as u32)); } @@ -469,21 +478,31 @@ impl Mempool { let _timer = self.latency.with_label_values(&["remove"]).start_timer(); for txid in &to_remove { - self.txstore + let tx = self + .txstore .remove(*txid) .unwrap_or_else(|| panic!("missing mempool tx {}", txid)); - self.feeinfo.remove(*txid).or_else(|| { - warn!("missing mempool tx feeinfo {}", txid); - None + self.feeinfo.remove(*txid).unwrap_or_else(|| { + panic!("missing mempool tx feeinfo {}", txid); }); - } - // TODO: make it more efficient (currently it takes O(|mempool|) time) - self.history.retain(|_scripthash, entries| { - entries.retain(|entry| !to_remove.contains(&entry.get_txid())); - !entries.is_empty() - }); + let scripthashes = self + .tx_scripthashes + .remove(*txid) + .unwrap_or_else(|| panic!("missing tx_scripthashes for {}", txid)); + prune_history_entries(&mut self.history, &scripthashes, txid); + + for txin in tx.input { + assert!( + self.edges.remove(&txin.previous_output).is_some(), + "missing mempool edge for outpoint {}:{} (tx {})", + txin.previous_output.txid, + txin.previous_output.vout, + txid + ); + } + } #[cfg(feature = "liquid")] asset::remove_mempool_tx_assets( @@ -491,9 +510,6 @@ impl Mempool { &mut self.asset_history, &mut self.asset_issuance, ); - - self.edges - .retain(|_outpoint, (txid, _vin)| !to_remove.contains(txid)); } #[cfg(feature = "liquid")] @@ -637,6 +653,32 @@ impl Mempool { } } +fn prune_history_entries( + history: &mut HashMap>, + scripthashes: &[FullHash], + txid: &Txid, +) { + for scripthash in scripthashes { + let entries = history + .get_mut(scripthash) + .unwrap_or_else(|| panic!("missing history bucket for {:?}", scripthash)); + + let before = entries.len(); + entries.retain(|entry| entry.get_txid() != *txid); + let removed = before - entries.len(); + assert!( + removed > 0, + "tx {} not found in history bucket {:?}", + txid, + scripthash + ); + + if entries.is_empty() { + history.remove(scripthash); + } + } +} + #[derive(Serialize)] pub struct BacklogStats { pub count: u32, @@ -656,10 +698,9 @@ impl BacklogStats { } #[trace] - fn new(feeinfo: &HashMap) -> Self { - let (count, vsize, total_fee) = feeinfo - .values() - .fold((0, 0, 0), |(count, vsize, fee), feeinfo| { + fn from_feeinfo_slice(fees: &[&TxFeeInfo]) -> Self { + let (count, vsize, total_fee) = + fees.iter().fold((0, 0, 0), |(count, vsize, fee), feeinfo| { (count + 1, vsize + feeinfo.vsize, fee + feeinfo.fee) }); @@ -667,7 +708,7 @@ impl BacklogStats { count, vsize, total_fee, - fee_histogram: make_fee_histogram(feeinfo.values().collect()), + fee_histogram: make_fee_histogram(fees.iter().copied().collect()), } } } diff --git a/src/new_index/query.rs b/src/new_index/query.rs index 03c5d201f..712fed330 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -1,5 +1,3 @@ -use rayon::prelude::*; - use std::collections::{BTreeSet, HashMap}; use std::sync::{Arc, RwLock, RwLockReadGuard}; use std::time::{Duration, Instant}; @@ -153,18 +151,29 @@ impl Query { } #[trace] - pub fn lookup_tx_spends(&self, tx: Transaction) -> Vec> { + pub fn lookup_tx_spends(&self, tx: &Transaction) -> Vec> { let txid = tx.compute_txid(); + let outpoints = tx + .output + .iter() + .enumerate() + .filter(|(_, txout)| is_spendable(txout)) + .map(|(vout, _)| OutPoint::new(txid, vout as u32)) + .collect::>(); + // First fetch all confirmed spends using a MultiGet operation, + // then fall back to the mempool for any outpoints not spent on-chain + let mut chain_spends = self.chain.lookup_spends(outpoints); + let mempool = self.mempool(); tx.output - .par_iter() + .iter() .enumerate() .map(|(vout, txout)| { if is_spendable(txout) { - self.lookup_spend(&OutPoint { - txid, - vout: vout as u32, - }) + let outpoint = OutPoint::new(txid, vout as u32); + chain_spends + .remove(&outpoint) + .or_else(|| mempool.lookup_spend(&outpoint)) } else { None } diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index c3e96d5d8..d5f7a974a 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -18,12 +18,9 @@ use elements::{ }; use std::collections::{BTreeSet, HashMap, HashSet}; -use std::path::Path; -use std::sync::{Arc, RwLock}; +use std::convert::TryInto; +use std::sync::{Arc, RwLock, RwLockReadGuard}; -use crate::{chain::{ - BlockHash, BlockHeader, Network, OutPoint, Script, Transaction, TxOut, Txid, Value, -}, new_index::db_metrics::RocksDbMetrics}; use crate::config::Config; use crate::daemon::Daemon; use crate::errors::*; @@ -32,6 +29,10 @@ use crate::util::{ bincode, full_hash, has_prevout, is_spendable, BlockHeaderMeta, BlockId, BlockMeta, BlockStatus, Bytes, HeaderEntry, HeaderList, ScriptToAddr, }; +use crate::{ + chain::{BlockHash, BlockHeader, Network, OutPoint, Script, Transaction, TxOut, Txid, Value}, + new_index::db_metrics::RocksDbMetrics, +}; use crate::new_index::db::{DBFlush, DBRow, ReverseScanIterator, ScanIterator, DB}; use crate::new_index::fetch::{start_fetcher, BlockEntry, FetchFrom}; @@ -58,16 +59,18 @@ pub struct Store { } impl Store { - pub fn open(path: &Path, config: &Config, metrics: &Metrics) -> Self { - let txstore_db = DB::open(&path.join("txstore"), config); + pub fn open(config: &Config, metrics: &Metrics, verify_compat: bool) -> Self { + let path = config.db_path.join("newindex"); + + let txstore_db = DB::open(&path.join("txstore"), config, verify_compat); let added_blockhashes = load_blockhashes(&txstore_db, &BlockRow::done_filter()); debug!("{} blocks were added", added_blockhashes.len()); - let history_db = DB::open(&path.join("history"), config); + let history_db = DB::open(&path.join("history"), config, verify_compat); let indexed_blockhashes = load_blockhashes(&history_db, &BlockRow::done_filter()); debug!("{} blocks were indexed", indexed_blockhashes.len()); - let cache_db = DB::open(&path.join("cache"), config); + let cache_db = DB::open(&path.join("cache"), config, verify_compat); let db_metrics = Arc::new(RocksDbMetrics::new(&metrics)); txstore_db.start_stats_exporter(Arc::clone(&db_metrics), "txstore_db"); @@ -75,8 +78,21 @@ impl Store { cache_db.start_stats_exporter(Arc::clone(&db_metrics), "cache_db"); let headers = if let Some(tip_hash) = txstore_db.get(b"t") { - let tip_hash = deserialize(&tip_hash).expect("invalid chain tip in `t`"); + let mut tip_hash = deserialize(&tip_hash).expect("invalid chain tip in `t`"); let headers_map = load_blockheaders(&txstore_db); + + // Move the tip back until we reach a block that is indexed in the history db. + // It is possible for the tip recorded under the db "t" key to be un-indexed if electrs + // shuts down during reorg handling. Normally this wouldn't matter because the non-indexed + // block would be stale, but it could matter if the chain later re-orged back to + // include the previously stale block because more blocks were built on top of it. + // Without this, the stale-then-not-stale block(s) would not get re-indexed correctly. + while !indexed_blockhashes.contains(&tip_hash) { + tip_hash = headers_map + .get(&tip_hash) + .expect("invalid header chain") + .prev_blockhash; + } debug!( "{} headers were loaded, tip at {:?}", headers_map.len(), @@ -109,6 +125,10 @@ impl Store { &self.cache_db } + pub fn headers(&self) -> RwLockReadGuard { + self.indexed_headers.read().unwrap() + } + pub fn done_initial_sync(&self) -> bool { self.txstore_db.get(b"t").is_some() } @@ -259,22 +279,62 @@ impl Indexer { db.enable_auto_compaction(); } - fn get_new_headers(&self, daemon: &Daemon, tip: &BlockHash) -> Result> { - let headers = self.store.indexed_headers.read().unwrap(); - let new_headers = daemon.get_new_headers(&headers, &tip)?; - let result = headers.order(new_headers); - - if let Some(tip) = result.last() { - info!("{:?} ({} left to index)", tip, result.len()); - }; - Ok(result) + fn get_new_headers( + &self, + daemon: &Daemon, + tip: &BlockHash, + ) -> Result<(Vec, Option)> { + let indexed_headers = self.store.indexed_headers.read().unwrap(); + let raw_new_headers = daemon.get_new_headers(&indexed_headers, tip)?; + let (new_headers, reorged_since) = indexed_headers.preprocess(raw_new_headers, tip); + + if let Some(tip) = new_headers.last() { + info!("{:?} ({} left to index)", tip, new_headers.len()); + } + Ok((new_headers, reorged_since)) } pub fn update(&mut self, daemon: &Daemon) -> Result { let daemon = daemon.reconnect()?; let tip = daemon.getbestblockhash()?; - let new_headers = self.get_new_headers(&daemon, &tip)?; + let (new_headers, reorged_since) = self.get_new_headers(&daemon, &tip)?; + + // Handle reorgs by undoing the reorged (stale) blocks first + if let Some(reorged_since) = reorged_since { + // Remove reorged headers from the in-memory HeaderList. + // This will also immediately invalidate all the history db entries originating from those blocks + // (even before the rows are deleted below), since they reference block heights that will no longer exist. + // This ensures consistency - it is not possible for blocks to be available (e.g. in GET /blocks/tip or /block/:hash) + // without the corresponding history entries for these blocks (e.g. in GET /address/:address/txs), or vice-versa. + let mut reorged_headers = self + .store + .indexed_headers + .write() + .unwrap() + .pop(reorged_since); + // The chain tip will temporarily drop to the common ancestor (at height reorged_since-1), + // until the new headers are `append()`ed (below). + + info!( + "processing reorg of depth {} since height {}", + reorged_headers.len(), + reorged_since, + ); + + // Reorged blocks are undone in chunks of 100, processed in serial, each as an atomic batch. + // Reverse them so that chunks closest to the chain tip are processed first, + // which is necessary to properly recover from crashes during reorg handling. + // Also see the comment under `Store::open()`. + reorged_headers.reverse(); + + // Fetch the reorged blocks, then undo their history index db rows. + // The txstore db rows are kept for reorged blocks/transactions. + start_fetcher(self.from, &daemon, reorged_headers)? + .map(|blocks| self.undo_index(&blocks)); + } + + // Add new blocks to the txstore db let to_add = self.headers_to_add(&new_headers); debug!( "adding transactions from {} blocks using {:?}", @@ -286,23 +346,24 @@ impl Indexer { let mut blocks_fetched = 0; let to_add_total = to_add.len(); - start_fetcher(self.from, &daemon, to_add)?.map(|blocks| - { - if fetcher_count % 25 == 0 && to_add_total > 20 { - info!("adding txes from blocks {}/{} ({:.1}%)", - blocks_fetched, - to_add_total, - blocks_fetched as f32 / to_add_total as f32 * 100.0 - ); - } - fetcher_count += 1; - blocks_fetched += blocks.len(); + start_fetcher(self.from, &daemon, to_add)?.map(|blocks| { + if fetcher_count % 25 == 0 && to_add_total > 20 { + info!( + "adding txes from blocks {}/{} ({:.1}%)", + blocks_fetched, + to_add_total, + blocks_fetched as f32 / to_add_total as f32 * 100.0 + ); + } + fetcher_count += 1; + blocks_fetched += blocks.len(); - self.add(&blocks) - }); + self.add(&blocks) + }); self.start_auto_compactions(&self.store.txstore_db); + // Index new blocks to the history db let to_index = self.headers_to_index(&new_headers); debug!( "indexing history from {} blocks using {:?}", @@ -320,19 +381,21 @@ impl Indexer { self.flush = DBFlush::Enable; } - // update the synced tip *after* the new data is flushed to disk + // Update the synced tip after all db writes are flushed debug!("updating synced tip to {:?}", tip); self.store.txstore_db.put_sync(b"t", &serialize(&tip)); + // Finally, append the new headers to the in-memory HeaderList. + // This will make both the headers and the history entries visible in the public APIs, consistently with each-other. let mut headers = self.store.indexed_headers.write().unwrap(); - headers.apply(new_headers); + headers.append(new_headers); assert_eq!(tip, *headers.tip()); if let FetchFrom::BlkFiles = self.from { self.from = FetchFrom::Bitcoind; } - self.tip_metric.set(headers.len() as i64 - 1); + self.tip_metric.set(headers.best_height() as i64); Ok(tip) } @@ -345,7 +408,7 @@ impl Indexer { }; { let _timer = self.start_timer("add_write"); - self.store.txstore_db.write(rows, self.flush); + self.store.txstore_db.write_rows(rows, self.flush); } self.store @@ -356,6 +419,37 @@ impl Indexer { } fn index(&self, blocks: &[BlockEntry]) { + self.store + .history_db + .write_rows(self._index(blocks), self.flush); + + let mut indexed_blockhashes = self.store.indexed_blockhashes.write().unwrap(); + indexed_blockhashes.extend(blocks.iter().map(|b| b.entry.hash())); + } + + // Undo the history db entries previously written for the given blocks (that were reorged). + // This includes the TxHistory, TxEdge, TxConf and BlockDone rows ('H', 'S', 'C' and 'D'), + // as well as the Elements history rows ('I' and 'i'). + // + // This does *not* remove any txstore db entries, which are intentionally kept + // even for reorged blocks. + fn undo_index(&self, blocks: &[BlockEntry]) { + self.store + .history_db + .delete_rows(self._index(blocks), self.flush); + // Note this doesn't actually "undo" the rows - the keys are simply deleted, and won't get + // reverted back to their prior value (if there was one). It is expected that the history db + // keys created by blocks are always unique and impossible to already exist from a prior block. + // This is true for all history keys (which always include the height or txid), but for example + // not true for the address prefix search index (in the txstore). + + let mut indexed_blockhashes = self.store.indexed_blockhashes.write().unwrap(); + for block in blocks { + indexed_blockhashes.remove(block.entry.hash()); + } + } + + fn _index(&self, blocks: &[BlockEntry]) -> Vec { let previous_txos_map = { let _timer = self.start_timer("index_lookup"); lookup_txos(&self.store.txstore_db, get_previous_txos(blocks)).unwrap() @@ -372,7 +466,7 @@ impl Indexer { } index_blocks(blocks, &previous_txos_map, &self.iconfig) }; - self.store.history_db.write(rows, self.flush); + rows } pub fn fetch_from(&mut self, from: FetchFrom) { @@ -420,6 +514,28 @@ impl ChainQuery { } } + pub fn get_block_txs( + &self, + hash: &BlockHash, + start_index: usize, + limit: usize, + ) -> Result> { + let txids = self.get_block_txids(hash).chain_err(|| "block not found")?; + ensure!(start_index < txids.len(), "start index out of range"); + + let txids_with_blockhash = txids + .into_iter() + .skip(start_index) + .take(limit) + .map(|txid| (txid, *hash)) + .collect::>(); + + self.lookup_txns(&txids_with_blockhash) + + // XXX use getblock in lightmode? a single RPC call, but would fetch all txs to get one page + // self.daemon.getblock(hash)?.txdata.into_iter().skip(start_index).take(limit).collect() + } + pub fn get_block_meta(&self, hash: &BlockHash) -> Option { let _timer = self.start_timer("get_block_meta"); @@ -445,17 +561,19 @@ impl ChainQuery { let entry = self.header_by_hash(hash)?; let meta = self.get_block_meta(hash)?; let txids = self.get_block_txids(hash)?; + let txids_with_blockhash: Vec<_> = + txids.into_iter().map(|txid| (txid, *hash)).collect(); + let raw_txs = self.lookup_raw_txns(&txids_with_blockhash).ok()?; // TODO avoid hiding all errors as None, return a Result // Reconstruct the raw block using the header and txids, // as let mut raw = Vec::with_capacity(meta.size as usize); raw.append(&mut serialize(entry.header())); - raw.append(&mut serialize(&VarInt(txids.len() as u64))); + raw.append(&mut serialize(&VarInt(raw_txs.len() as u64))); - for txid in txids { - // we don't need to provide the blockhash because we know we're not in light mode - raw.append(&mut self.lookup_raw_txn(&txid, None)?); + for mut raw_tx in raw_txs { + raw.append(&mut raw_tx); } Some(raw) @@ -514,13 +632,15 @@ impl ChainQuery { limit: usize, ) -> Vec<(Transaction, BlockId)> { let _timer_scan = self.start_timer("history"); - let txs_conf = self + let headers = self.store.indexed_headers.read().unwrap(); + let history_iter = self .history_iter_scan_reverse(code, hash) - .map(|row| TxHistoryRow::from_row(row).get_txid()) + .map(TxHistoryRow::from_row) + .map(|row| (row.get_txid(), row.key.confirmed_height as usize)) // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? .unique() // TODO seek directly to last seen tx without reading earlier rows - .skip_while(|txid| { + .skip_while(|(txid, _)| { // skip until we reach the last_seen_txid last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) }) @@ -528,15 +648,23 @@ impl ChainQuery { Some(_) => 1, // skip the last_seen_txid itself None => 0, }) - .filter_map(|txid| self.tx_confirming_block(&txid).map(|b| (txid, b))) - .take(limit) - .collect::>(); + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + .filter_map(|(txid, height)| Some((txid, headers.header_by_height(height)?))) + .take(limit); + + let mut txids_with_blockhash = Vec::with_capacity(limit); + let mut blockids = Vec::with_capacity(limit); + for (txid, header) in history_iter { + txids_with_blockhash.push((txid, *header.hash())); + blockids.push(BlockId::from(header)); + } + drop(headers); - self.lookup_txns(&txs_conf) + self.lookup_txns(&txids_with_blockhash) .expect("failed looking up txs in history index") .into_iter() - .zip(txs_conf) - .map(|(tx, (_, blockid))| (tx, blockid)) + .zip(blockids) + .map(|(tx, blockid)| (tx, blockid)) .collect() } @@ -547,10 +675,13 @@ impl ChainQuery { fn _history_txids(&self, code: u8, hash: &[u8], limit: usize) -> Vec<(Txid, BlockId)> { let _timer = self.start_timer("history_txids"); + let headers = self.store.indexed_headers.read().unwrap(); self.history_iter_scan(code, hash, 0) - .map(|row| TxHistoryRow::from_row(row).get_txid()) + .map(TxHistoryRow::from_row) + .map(|row| (row.get_txid(), row.key.confirmed_height as usize)) .unique() - .filter_map(|txid| self.tx_confirming_block(&txid).map(|b| (txid, b))) + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + .filter_map(|(txid, height)| Some((txid, headers.header_by_height(height)?.into()))) .take(limit) .collect() } @@ -582,7 +713,7 @@ impl ChainQuery { // save updated utxo set to cache if let Some(lastblock) = lastblock { if had_cache || processed_items > MIN_HISTORY_ITEMS_TO_CACHE { - self.store.cache_db.write( + self.store.cache_db.write_rows( vec![UtxoCacheRow::new(scripthash, &newutxos, &lastblock).into_row()], DBFlush::Enable, ); @@ -624,12 +755,14 @@ impl ChainQuery { limit: usize, ) -> Result<(UtxoMap, Option, usize)> { let _timer = self.start_timer("utxo_delta"); + let headers = self.store.indexed_headers.read().unwrap(); let history_iter = self .history_iter_scan(b'H', scripthash, start_height) .map(TxHistoryRow::from_row) + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) .filter_map(|history| { - self.tx_confirming_block(&history.get_txid()) - .map(|b| (history, b)) + let header = headers.header_by_height(history.key.confirmed_height as usize)?; + Some((history, BlockId::from(header))) }); let mut utxos = init_utxos; @@ -685,7 +818,7 @@ impl ChainQuery { // save updated stats to cache if let Some(lastblock) = lastblock { if newstats.funded_txo_count + newstats.spent_txo_count > MIN_HISTORY_ITEMS_TO_CACHE { - self.store.cache_db.write( + self.store.cache_db.write_rows( vec![StatsCacheRow::new(scripthash, &newstats, &lastblock).into_row()], DBFlush::Enable, ); @@ -702,15 +835,14 @@ impl ChainQuery { start_height: usize, ) -> (ScriptStats, Option) { let _timer = self.start_timer("stats_delta"); // TODO: measure also the number of txns processed. + let headers = self.store.indexed_headers.read().unwrap(); let history_iter = self .history_iter_scan(b'H', scripthash, start_height) .map(TxHistoryRow::from_row) + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) .filter_map(|history| { - self.tx_confirming_block(&history.get_txid()) - // drop history entries that were previously confirmed in a re-orged block and later - // confirmed again at a different height - .filter(|blockid| blockid.height == history.key.confirmed_height as usize) - .map(|blockid| (history, blockid)) + let header = headers.header_by_height(history.key.confirmed_height as usize)?; + Some((history, BlockId::from(header))) }); let mut stats = init_stats; @@ -765,7 +897,7 @@ impl ChainQuery { pub fn address_search(&self, prefix: &str, limit: usize) -> Vec { let _timer_scan = self.start_timer("address_search"); self.store - .history_db + .txstore_db .iter_scan(&addr_search_filter(prefix)) .take(limit) .map(|row| std::str::from_utf8(&row.key[1..]).unwrap().to_string()) @@ -828,8 +960,9 @@ impl ChainQuery { .map(BlockId::from) } + /// Get the chain tip height. Panics if called on an empty HeaderList. pub fn best_height(&self) -> usize { - self.store.indexed_headers.read().unwrap().len() - 1 + self.store.indexed_headers.read().unwrap().best_height() } pub fn best_hash(&self) -> BlockHash { @@ -844,26 +977,40 @@ impl ChainQuery { .clone() } - // TODO: can we pass txids as a "generic iterable"? - // TODO: should also use a custom ThreadPoolBuilder? - pub fn lookup_txns(&self, txids: &[(Txid, BlockId)]) -> Result> { + pub fn lookup_txns(&self, txids: &[(Txid, BlockHash)]) -> Result> { let _timer = self.start_timer("lookup_txns"); - txids - .par_iter() - .map(|(txid, blockid)| { - self.lookup_txn(txid, Some(&blockid.hash)) - .chain_err(|| "missing tx") - }) - .collect::>>() + Ok(self + .lookup_raw_txns(txids)? + .into_iter() + .map(|rawtx| deserialize(&rawtx).expect("failed to parse Transaction")) + .collect()) } pub fn lookup_txn(&self, txid: &Txid, blockhash: Option<&BlockHash>) -> Option { let _timer = self.start_timer("lookup_txn"); - self.lookup_raw_txn(txid, blockhash).map(|rawtx| { - let txn: Transaction = deserialize(&rawtx).expect("failed to parse Transaction"); - assert_eq!(*txid, txn.compute_txid()); - txn - }) + let rawtx = self.lookup_raw_txn(txid, blockhash)?; + Some(deserialize(&rawtx).expect("failed to parse Transaction")) + } + + pub fn lookup_raw_txns(&self, txids: &[(Txid, BlockHash)]) -> Result> { + let _timer = self.start_timer("lookup_raw_txns"); + if self.light_mode { + txids + .par_iter() + .map(|(txid, blockhash)| { + self.lookup_raw_txn(txid, Some(blockhash)) + .chain_err(|| "missing tx") + }) + .collect() + } else { + let keys = txids.iter().map(|(txid, _)| TxRow::key(&txid[..])); + self.store + .txstore_db + .multi_get(keys) + .into_iter() + .map(|val| val.unwrap().chain_err(|| "missing tx")) + .collect() + } } pub fn lookup_raw_txn(&self, txid: &Txid, blockhash: Option<&BlockHash>) -> Option { @@ -897,34 +1044,54 @@ impl ChainQuery { pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option { let _timer = self.start_timer("lookup_spend"); + let edge = TxEdgeValue::from_bytes(&self.store.history_db.get(&TxEdgeRow::key(outpoint))?); + let headers = self.store.indexed_headers.read().unwrap(); + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + let header = headers.header_by_height(edge.spending_height as usize)?; + Some(SpendingInput { + txid: deserialize(&edge.spending_txid).expect("failed to parse Txid"), + vin: edge.spending_vin as u32, + confirmed: Some(header.into()), + }) + } + + pub fn lookup_spends(&self, outpoints: BTreeSet) -> HashMap { + let _timer = self.start_timer("lookup_spends"); + let headers = self.store.indexed_headers.read().unwrap(); self.store .history_db - .iter_scan(&TxEdgeRow::filter(&outpoint)) - .map(TxEdgeRow::from_row) - .find_map(|edge| { - let txid: Txid = deserialize(&edge.key.spending_txid).unwrap(); - self.tx_confirming_block(&txid).map(|b| SpendingInput { - txid, - vin: edge.key.spending_vin as u32, - confirmed: Some(b), - }) + .multi_get(outpoints.iter().map(TxEdgeRow::key)) + .into_iter() + .zip(outpoints) + .filter_map(|(edge_val, outpoint)| { + let edge = TxEdgeValue::from_bytes(&edge_val.unwrap()?); + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + let header = headers.header_by_height(edge.spending_height as usize)?; + Some(( + outpoint, + SpendingInput { + txid: deserialize(&edge.spending_txid).expect("failed to parse Txid"), + vin: edge.spending_vin as u32, + confirmed: Some(header.into()), + }, + )) }) + .collect() } pub fn tx_confirming_block(&self, txid: &Txid) -> Option { let _timer = self.start_timer("tx_confirming_block"); + let row_value = self.store.history_db.get(&TxConfRow::key(txid))?; + let height = TxConfRow::height_from_val(&row_value); let headers = self.store.indexed_headers.read().unwrap(); - self.store - .txstore_db - .iter_scan(&TxConfRow::filter(&txid[..])) - .map(TxConfRow::from_row) - // header_by_blockhash only returns blocks that are part of the best chain, - // or None for orphaned blocks. - .filter_map(|conf| { - headers.header_by_blockhash(&deserialize(&conf.key.blockhash).unwrap()) - }) - .next() - .map(BlockId::from) + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + Some(headers.header_by_height(height as usize)?.into()) + } + + pub fn lookup_confirmations(&self, txids: BTreeSet) -> HashMap { + let _timer = self.start_timer("lookup_confirmations"); + let headers = self.store.indexed_headers.read().unwrap(); + lookup_confirmations(&self.store.history_db, headers.best_height() as u32, txids) } pub fn get_block_status(&self, hash: &BlockHash) -> BlockStatus { @@ -998,7 +1165,6 @@ fn load_blockheaders(db: &DB) -> HashMap { fn add_blocks(block_entries: &[BlockEntry], iconfig: &IndexerConfig) -> Vec { // persist individual transactions: // T{txid} → {rawtx} - // C{txid}{blockhash}{height} → // O{txid}{index} → {txout} // persist block headers', block txids' and metadata rows: // B{blockhash} → {header} @@ -1011,7 +1177,7 @@ fn add_blocks(block_entries: &[BlockEntry], iconfig: &IndexerConfig) -> Vec = b.block.txdata.iter().map(|tx| tx.compute_txid()).collect(); for (tx, txid) in b.block.txdata.iter().zip(txids.iter()) { - add_transaction(*txid, tx, blockhash, &mut rows, iconfig); + add_transaction(*txid, tx, &mut rows, iconfig); } if !iconfig.light_mode { @@ -1027,15 +1193,7 @@ fn add_blocks(block_entries: &[BlockEntry], iconfig: &IndexerConfig) -> Vec, - iconfig: &IndexerConfig, -) { - rows.push(TxConfRow::new(txid, blockhash).into_row()); - +fn add_transaction(txid: Txid, tx: &Transaction, rows: &mut Vec, iconfig: &IndexerConfig) { if !iconfig.light_mode { rows.push(TxRow::new(txid, tx).into_row()); } @@ -1045,6 +1203,12 @@ fn add_transaction( if is_spendable(txo) { rows.push(TxOutRow::new(&txid, txo_index, txo).into_row()); } + + if iconfig.address_search { + if let Some(row) = addr_search_row(&txo.script_pubkey, iconfig.network) { + rows.push(row); + } + } } } @@ -1082,6 +1246,23 @@ fn lookup_txo(txstore_db: &DB, outpoint: &OutPoint) -> Option { .map(|val| deserialize(&val).expect("failed to parse TxOut")) } +pub fn lookup_confirmations( + history_db: &DB, + tip_height: u32, + txids: BTreeSet, +) -> HashMap { + history_db + .multi_get(txids.iter().map(TxConfRow::key)) + .into_iter() + .zip(txids) + .filter_map(|(res, txid)| { + let confirmation_height = u32::from_le_bytes(res.unwrap()?.try_into().unwrap()); + // skip over entries that point to non-existing heights (may happen while new/reorged blocks are being processed) + (confirmation_height <= tip_height).then_some((txid, confirmation_height)) + }) + .collect() +} + fn index_blocks( block_entries: &[BlockEntry], previous_txos_map: &HashMap, @@ -1110,12 +1291,17 @@ fn index_transaction( rows: &mut Vec, iconfig: &IndexerConfig, ) { + let txid = full_hash(&tx.compute_txid()[..]); + + // persist tx confirmation row: + // C{txid} → "{block_height}" + rows.push(TxConfRow::new(txid, confirmed_height).into_row()); + // persist history index: // H{funding-scripthash}{funding-height}F{funding-txid:vout} → "" // H{funding-scripthash}{spending-height}S{spending-txid:vin}{funding-txid:vout} → "" // persist "edges" for fast is-this-TXO-spent check // S{funding-txid:vout}{spending-txid:vin} → "" - let txid = full_hash(&tx.compute_txid()[..]); for (txo_index, txo) in tx.output.iter().enumerate() { if is_spendable(txo) || iconfig.index_unspendables { let history = TxHistoryRow::new( @@ -1128,12 +1314,6 @@ fn index_transaction( }), ); rows.push(history.into_row()); - - if iconfig.address_search { - if let Some(row) = addr_search_row(&txo.script_pubkey, iconfig.network) { - rows.push(row); - } - } } } for (txi_index, txi) in tx.input.iter().enumerate() { @@ -1162,6 +1342,7 @@ fn index_transaction( txi.previous_output.vout as u16, txid, txi_index as u16, + confirmed_height, ); rows.push(edge.into_row()); } @@ -1237,43 +1418,41 @@ impl TxRow { } #[derive(Serialize, Deserialize)] -struct TxConfKey { +pub struct TxConfKey { code: u8, txid: FullHash, - blockhash: FullHash, } -struct TxConfRow { +pub struct TxConfRow { key: TxConfKey, + value: u32, // the confirmation height } impl TxConfRow { - fn new(txid: Txid, blockhash: FullHash) -> TxConfRow { - let txid = full_hash(&txid[..]); + pub fn new(txid: FullHash, height: u32) -> TxConfRow { TxConfRow { - key: TxConfKey { - code: b'C', - txid, - blockhash, - }, + key: TxConfKey { code: b'C', txid }, + value: height, } } - fn filter(prefix: &[u8]) -> Bytes { - [b"C", prefix].concat() + pub fn key(txid: &Txid) -> Bytes { + bincode::serialize_little(&TxConfKey { + code: b'C', + txid: full_hash(&txid[..]), + }) + .unwrap() } - fn into_row(self) -> DBRow { + pub fn into_row(self) -> DBRow { DBRow { key: bincode::serialize_little(&self.key).unwrap(), - value: vec![], + value: self.value.to_le_bytes().to_vec(), } } - fn from_row(row: DBRow) -> Self { - TxConfRow { - key: bincode::deserialize_little(&row.key).expect("failed to parse TxConfKey"), - } + fn height_from_val(val: &[u8]) -> u32 { + u32::from_le_bytes(val.try_into().expect("invalid TxConf value")) } } @@ -1516,52 +1695,61 @@ impl TxHistoryInfo { } #[derive(Serialize, Deserialize)] -struct TxEdgeKey { +pub struct TxEdgeKey { code: u8, funding_txid: FullHash, funding_vout: u16, +} + +#[derive(Serialize, Deserialize)] +pub struct TxEdgeValue { spending_txid: FullHash, spending_vin: u16, + spending_height: u32, } -struct TxEdgeRow { +pub struct TxEdgeRow { key: TxEdgeKey, + value: TxEdgeValue, } impl TxEdgeRow { - fn new( + pub fn new( funding_txid: FullHash, funding_vout: u16, spending_txid: FullHash, spending_vin: u16, + spending_height: u32, ) -> Self { let key = TxEdgeKey { code: b'S', funding_txid, funding_vout, + }; + let value = TxEdgeValue { spending_txid, spending_vin, + spending_height, }; - TxEdgeRow { key } + TxEdgeRow { key, value } } - fn filter(outpoint: &OutPoint) -> Bytes { - // TODO build key without using bincode? [ b"S", &outpoint.txid[..], outpoint.vout?? ].concat() + fn key(outpoint: &OutPoint) -> Bytes { bincode::serialize_little(&(b'S', full_hash(&outpoint.txid[..]), outpoint.vout as u16)) .unwrap() } - fn into_row(self) -> DBRow { + pub fn into_row(self) -> DBRow { DBRow { key: bincode::serialize_little(&self.key).unwrap(), - value: vec![], + value: bincode::serialize_little(&self.value).unwrap(), } } +} - fn from_row(row: DBRow) -> Self { - TxEdgeRow { - key: bincode::deserialize_little(&row.key).expect("failed to deserialize TxEdgeKey"), - } +impl TxEdgeValue { + fn from_bytes(bytes: &[u8]) -> Self { + bincode::deserialize_little(bytes).expect("invalid TxEdgeValue") } } diff --git a/src/rest.rs b/src/rest.rs index cefc49b7c..43fc7461a 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -718,41 +718,28 @@ fn handle_request( } (&Method::GET, Some(&"block"), Some(hash), Some(&"txs"), start_index, None) => { let hash = BlockHash::from_str(hash)?; - let txids = query - .chain() - .get_block_txids(&hash) - .ok_or_else(|| HttpError::not_found("Block not found".to_string()))?; - let start_index = start_index .map_or(0u32, |el| el.parse().unwrap_or(0)) .max(0u32) as usize; - if start_index >= txids.len() { - bail!(HttpError::not_found("start index out of range".to_string())); - } else if start_index % CHAIN_TXS_PER_PAGE != 0 { - bail!(HttpError::from(format!( - "start index must be a multipication of {}", - CHAIN_TXS_PER_PAGE - ))); - } - // blockid_by_hash() only returns the BlockId for non-orphaned blocks, - // or None for orphaned - let confirmed_blockid = query.chain().blockid_by_hash(&hash); + ensure!( + start_index % CHAIN_TXS_PER_PAGE == 0, + "start index must be a multipication of {}", + CHAIN_TXS_PER_PAGE + ); + + // The BlockId would not be available for stale blocks + let blockid = query.chain().blockid_by_hash(&hash); - let txs = txids - .iter() - .skip(start_index) - .take(CHAIN_TXS_PER_PAGE) - .map(|txid| { - query - .lookup_txn(&txid) - .map(|tx| (tx, confirmed_blockid.clone())) - .ok_or_else(|| "missing tx".to_string()) - }) - .collect::)>, _>>()?; + let txs = query + .chain() + .get_block_txs(&hash, start_index, CHAIN_TXS_PER_PAGE)? + .into_iter() + .map(|tx| (tx, blockid)) + .collect(); - // XXX orphraned blocks alway get TTL_SHORT - let ttl = ttl_by_depth(confirmed_blockid.map(|b| b.height), query); + // XXX stale blocks alway get TTL_SHORT + let ttl = ttl_by_depth(blockid.map(|b| b.height), query); json_response(prepare_txs(txs, query, config), ttl) } @@ -996,7 +983,7 @@ fn handle_request( .lookup_txn(&hash) .ok_or_else(|| HttpError::not_found("Transaction not found".to_string()))?; let spends: Vec = query - .lookup_tx_spends(tx) + .lookup_tx_spends(&tx) .into_iter() .map(|spend| spend.map_or_else(SpendingValue::default, SpendingValue::from)) .collect(); @@ -1060,8 +1047,8 @@ fn handle_request( HttpError::from(format!("Invalid transaction hex for item {}", index)) })? .filter(|r| r.is_err()) - .next() - .transpose() + .next() + .transpose() .map_err(|_| { HttpError::from(format!("Invalid transaction hex for item {}", index)) }) diff --git a/src/util/block.rs b/src/util/block.rs index 5dac63bcf..c5c2f7c5a 100644 --- a/src/util/block.rs +++ b/src/util/block.rs @@ -2,9 +2,9 @@ use crate::chain::{BlockHash, BlockHeader}; use crate::errors::*; use crate::new_index::BlockEntry; +use itertools::Itertools; use std::collections::HashMap; use std::fmt; -use std::iter::FromIterator; use std::slice; use time::format_description::well_known::Rfc3339; use time::OffsetDateTime as DateTime; @@ -128,59 +128,88 @@ impl HeaderList { ); let mut headers = HeaderList::empty(); - headers.apply(headers.order(headers_chain)); + headers.append(headers.preprocess(headers_chain, &tip_hash).0); headers } + /// Pre-process the given `BlockHeader`s to verify they connect to the chain and to + /// transform them into `HeaderEntry`s with heights and hashes - but without saving them. + /// If the headers trigger a reorg, the `reorged_since` height is returned too. + /// Actually applying the headers requires to first pop() the reorged blocks (if any), + /// then append() the new ones. #[trace] - pub fn order(&self, new_headers: Vec) -> Vec { + pub fn preprocess( + &self, + new_headers: Vec, + new_tip: &BlockHash, + ) -> (Vec, Option) { // header[i] -> header[i-1] (i.e. header.last() is the tip) - struct HashedHeader { - blockhash: BlockHash, - header: BlockHeader, - } - let hashed_headers = - Vec::::from_iter(new_headers.into_iter().map(|header| HashedHeader { - blockhash: header.block_hash(), - header, - })); - for i in 1..hashed_headers.len() { - assert_eq!( - hashed_headers[i].header.prev_blockhash, - hashed_headers[i - 1].blockhash - ); - } - let prev_blockhash = match hashed_headers.first() { - Some(h) => h.header.prev_blockhash, - None => return vec![], // hashed_headers is empty - }; - let new_height: usize = if prev_blockhash == *DEFAULT_BLOCKHASH { - 0 + let (new_height, header_entries) = if !new_headers.is_empty() { + let hashed_headers = new_headers + .into_iter() + .map(|h| (h.block_hash(), h)) + .collect::>(); + for ((curr_blockhash, _), (_, next_header)) in hashed_headers.iter().tuple_windows() { + assert_eq!(*curr_blockhash, next_header.prev_blockhash); + } + assert_eq!(hashed_headers.last().unwrap().0, *new_tip); + + let prev_blockhash = &hashed_headers.first().unwrap().1.prev_blockhash; + let new_height = if *prev_blockhash == *DEFAULT_BLOCKHASH { + 0 + } else { + self.header_by_blockhash(prev_blockhash) + .expect("headers do not connect") + .height() + + 1 + }; + let header_entries = (new_height..) + .zip(hashed_headers) + .map(|(height, (hash, header))| HeaderEntry { + height, + hash, + header, + }) + .collect(); + (new_height, header_entries) } else { - self.header_by_blockhash(&prev_blockhash) - .unwrap_or_else(|| panic!("{} is not part of the blockchain", prev_blockhash)) + // No new headers, but the new tip could potentially shorten the chain (or be a no-op if it matches the existing tip) + // This should not normally happen, but might due to manual `invalidateblock` + let new_height = self + .header_by_blockhash(new_tip) + .expect("new tip not in chain") .height() - + 1 + + 1; + (new_height, vec![]) }; - (new_height..) - .zip(hashed_headers.into_iter()) - .map(|(height, hashed_header)| HeaderEntry { - height, - hash: hashed_header.blockhash, - header: hashed_header.header, - }) - .collect() + let reorged_since = (new_height < self.len()).then_some(new_height); + (header_entries, reorged_since) + } + + /// Pop off reorged blocks since (including) the given height and return them. + #[trace] + pub fn pop(&mut self, since_height: usize) -> Vec { + let reorged_headers = self.headers.split_off(since_height); + + for header in &reorged_headers { + self.heights.remove(header.hash()); + } + self.tip = self + .headers + .last() + .map(|h| *h.hash()) + .unwrap_or_else(|| *DEFAULT_BLOCKHASH); + + reorged_headers } + /// Append new headers. Expected to always extend the tip (stale blocks must be removed first) #[trace] - pub fn apply(&mut self, new_headers: Vec) { + pub fn append(&mut self, new_headers: Vec) { // new_headers[i] -> new_headers[i - 1] (i.e. new_headers.last() is the tip) - for i in 1..new_headers.len() { - assert_eq!(new_headers[i - 1].height() + 1, new_headers[i].height()); - assert_eq!( - *new_headers[i - 1].hash(), - new_headers[i].header().prev_blockhash - ); + for (curr_header, next_header) in new_headers.iter().tuple_windows() { + assert_eq!(curr_header.height() + 1, next_header.height()); + assert_eq!(*curr_header.hash(), next_header.header().prev_blockhash); } let new_height = match new_headers.first() { Some(entry) => { @@ -200,7 +229,7 @@ impl HeaderList { new_headers.len(), new_height ); - let _removed = self.headers.split_off(new_height); // keep [0..new_height) entries + assert_eq!(new_height, self.headers.len()); for new_header in new_headers { let height = new_header.height(); assert_eq!(height, self.headers.len()); @@ -214,11 +243,8 @@ impl HeaderList { pub fn header_by_blockhash(&self, blockhash: &BlockHash) -> Option<&HeaderEntry> { let height = self.heights.get(blockhash)?; let header = self.headers.get(*height)?; - if *blockhash == *header.hash() { - Some(header) - } else { - None - } + assert_eq!(header.hash(), blockhash); + Some(header) } #[trace] @@ -248,6 +274,13 @@ impl HeaderList { self.headers.len() } + /// Get the chain tip height. Panics if called on an empty HeaderList. + pub fn best_height(&self) -> usize { + self.len() + .checked_sub(1) + .expect("best_height() on empty HeaderList") + } + pub fn is_empty(&self) -> bool { self.headers.is_empty() } @@ -262,7 +295,7 @@ impl HeaderList { // Matches bitcoind's behaviour: bitcoin-cli getblock `bitcoin-cli getblockhash 0` | jq '.time == .mediantime' if height == 0 { self.headers.get(0).unwrap().header.time - } else if height > self.len() - 1 { + } else if height > self.best_height() { 0 } else { let mut timestamps = (height.saturating_sub(MTP_SPAN - 1)..=height) diff --git a/tests/common.rs b/tests/common.rs index 5fb995d2d..b160cac8c 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -17,6 +17,7 @@ use elementsd::{self as noded, ElementsD as NodeD}; use noded::bitcoincore_rpc::{self, RpcApi}; +use electrs::config::RpcLogging; use electrs::{ chain::{Address, BlockHash, Network, Txid}, config::Config, @@ -27,7 +28,6 @@ use electrs::{ rest, signal::Waiter, }; -use electrs::config::RpcLogging; pub struct TestRunner { config: Arc, @@ -144,7 +144,7 @@ impl TestRunner { &metrics, )?); - let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config, &metrics)); + let store = Arc::new(Store::open(&config, &metrics, true)); let fetch_from = if !env::var("JSONRPC_IMPORT").is_ok() && !cfg!(feature = "liquid") { // run the initial indexing from the blk files then switch to using the jsonrpc, @@ -276,25 +276,50 @@ impl TestRunner { } } +// Make the RpcApi methods available directly on TestRunner, +// without having to go through the node_client() getter +impl bitcoincore_rpc::RpcApi for TestRunner { + fn call serde::de::Deserialize<'a>>( + &self, + cmd: &str, + args: &[serde_json::Value], + ) -> bitcoincore_rpc::Result { + self.node_client().call(cmd, args) + } +} + pub fn init_rest_tester() -> Result<(rest::Handle, net::SocketAddr, TestRunner)> { let tester = TestRunner::new()?; + let addr = tester.config.http_addr; let rest_server = rest::start(Arc::clone(&tester.config), Arc::clone(&tester.query)); - log::info!("REST server running on {}", tester.config.http_addr); - Ok((rest_server, tester.config.http_addr, tester)) + // Wait for the REST server thread to bind and start listening + for _ in 0..50 { + if net::TcpStream::connect(addr).is_ok() { + log::info!("REST server running on {}", addr); + return Ok((rest_server, addr, tester)); + } + std::thread::sleep(std::time::Duration::from_millis(100)); + } + panic!("REST server failed to start on {}", addr); } pub fn init_electrum_tester() -> Result<(ElectrumRPC, net::SocketAddr, TestRunner)> { let tester = TestRunner::new()?; + let addr = tester.config.electrum_rpc_addr; let electrum_server = ElectrumRPC::start( Arc::clone(&tester.config), Arc::clone(&tester.query), &tester.metrics, Arc::clone(&tester.salt_rwlock), ); - log::info!( - "Electrum server running on {}", - tester.config.electrum_rpc_addr - ); - Ok((electrum_server, tester.config.electrum_rpc_addr, tester)) + // Wait for the Electrum server thread to bind and start listening + for _ in 0..50 { + if net::TcpStream::connect(addr).is_ok() { + log::info!("Electrum server running on {}", addr); + return Ok((electrum_server, addr, tester)); + } + std::thread::sleep(std::time::Duration::from_millis(100)); + } + panic!("Electrum server failed to start on {}", addr); } #[cfg(not(feature = "liquid"))] @@ -336,9 +361,20 @@ fn init_log() -> StdErrLog { } fn rand_available_addr() -> net::SocketAddr { - // note this has a potential but unlikely race condition, if the port is grabbed before the caller binds it - let socket = net::UdpSocket::bind("127.0.0.1:0").unwrap(); - socket.local_addr().unwrap() + use std::collections::HashSet; + use std::sync::Mutex; + + lazy_static::lazy_static! { + static ref USED_PORTS: Mutex> = Mutex::new(HashSet::new()); + } + + loop { + let socket = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = socket.local_addr().unwrap(); + if USED_PORTS.lock().unwrap().insert(addr.port()) { + return addr; + } + } } error_chain::error_chain! { diff --git a/tests/electrum.rs b/tests/electrum.rs index 8bf148336..a3e54327f 100644 --- a/tests/electrum.rs +++ b/tests/electrum.rs @@ -9,36 +9,51 @@ use electrumd::jsonrpc::serde_json::json; use electrumd::ElectrumD; use electrs::chain::Address; +use electrs::electrum::RPC as ElectrumRPC; #[cfg(not(feature = "liquid"))] use bitcoin::address; -/// Test the Electrum RPC server using an headless Electrum wallet -/// This only runs on Bitcoin (non-Liquid) mode. -#[cfg_attr(not(feature = "liquid"), test)] -#[cfg_attr(feature = "liquid", allow(dead_code))] -fn test_electrum() -> Result<()> { - // Spawn an Electrs Electrum RPC server - let (electrum_server, electrum_addr, mut tester) = common::init_electrum_tester().unwrap(); - - // Spawn an headless Electrum wallet RPC daemon, connected to Electrs - let mut electrum_wallet_conf = electrumd::Conf::default(); - let server_arg = format!("{}:t", electrum_addr.to_string()); - electrum_wallet_conf.args = if std::env::var_os("RUST_LOG").is_some() { - vec!["-v", "--server", &server_arg] - } else { - vec!["--server", &server_arg] - }; - electrum_wallet_conf.view_stdout = true; - let electrum_wallet = ElectrumD::with_conf(electrumd::exe_path()?, &electrum_wallet_conf)?; - - let notify_wallet = || { - electrum_server.notify(); +struct WalletTester { + electrum_server: ElectrumRPC, + electrum_wallet: ElectrumD, + tester: common::TestRunner, +} + +impl WalletTester { + fn new() -> Result { + let (electrum_server, electrum_addr, tester) = common::init_electrum_tester().unwrap(); + + let mut electrum_wallet_conf = electrumd::Conf::default(); + let server_arg = format!("{}:t", electrum_addr); + electrum_wallet_conf.args = if std::env::var_os("RUST_LOG").is_some() { + vec!["-v", "--server", &server_arg] + } else { + vec!["--server", &server_arg] + }; + electrum_wallet_conf.view_stdout = true; + let electrum_wallet = + ElectrumD::with_conf(electrumd::exe_path()?, &electrum_wallet_conf)?; + + log::info!( + "Electrum wallet version: {:?}", + electrum_wallet.call("version", &json!([]))? + ); + + Ok(WalletTester { + electrum_server, + electrum_wallet, + tester, + }) + } + + fn notify_wallet(&self) { + self.electrum_server.notify(); std::thread::sleep(std::time::Duration::from_millis(200)); - }; + } - let assert_balance = |confirmed: f64, unconfirmed: f64| { - let balance = electrum_wallet.call("getbalance", &json!([])).unwrap(); + fn assert_balance(&self, confirmed: f64, unconfirmed: f64) { + let balance = self.electrum_wallet.call("getbalance", &json!([])).unwrap(); log::info!("balance: {}", balance); assert_eq!( @@ -53,15 +68,16 @@ fn test_electrum() -> Result<()> { } else { assert!(balance["unconfirmed"].is_null()) } - }; + } - let newaddress = || -> Address { + fn newaddress(&self) -> Address { #[cfg(not(feature = "liquid"))] type ParseAddrType = Address; #[cfg(feature = "liquid")] type ParseAddrType = Address; - let addr = electrum_wallet + let addr = self + .electrum_wallet .call("createnewaddress", &json!([])) .unwrap() .as_str() @@ -73,37 +89,55 @@ fn test_electrum() -> Result<()> { let addr = addr.assume_checked(); addr - }; + } +} - log::info!( - "Electrum wallet version: {:?}", - electrum_wallet.call("version", &json!([]))? - ); +/// Test balance tracking with confirmed and unconfirmed transactions +#[cfg_attr(not(feature = "liquid"), test)] +#[cfg_attr(feature = "liquid", allow(dead_code))] +fn test_electrum_balance() -> Result<()> { + let mut wt = WalletTester::new()?; + + let addr1 = wt.newaddress(); + let addr2 = wt.newaddress(); + + wt.assert_balance(0.0, 0.0); + + wt.tester.send(&addr1, "0.1 BTC".parse().unwrap())?; + wt.notify_wallet(); + wt.assert_balance(0.0, 0.1); - // Send some funds and verify that the balance checks out - let addr1 = newaddress(); - let addr2 = newaddress(); + wt.tester.mine()?; + wt.notify_wallet(); + wt.assert_balance(0.1, 0.0); - assert_balance(0.0, 0.0); + wt.tester.send(&addr2, "0.2 BTC".parse().unwrap())?; + wt.notify_wallet(); + wt.assert_balance(0.1, 0.2); - let txid1 = tester.send(&addr1, "0.1 BTC".parse().unwrap())?; - notify_wallet(); - assert_balance(0.0, 0.1); + wt.tester.mine()?; + wt.notify_wallet(); + wt.assert_balance(0.3, 0.0); - tester.mine()?; - notify_wallet(); - assert_balance(0.1, 0.0); + Ok(()) +} + +/// Test transaction history via onchain_history +#[cfg_attr(not(feature = "liquid"), test)] +#[cfg_attr(feature = "liquid", allow(dead_code))] +fn test_electrum_history() -> Result<()> { + let mut wt = WalletTester::new()?; - let txid2 = tester.send(&addr2, "0.2 BTC".parse().unwrap())?; - notify_wallet(); - assert_balance(0.1, 0.2); + let addr1 = wt.newaddress(); + let addr2 = wt.newaddress(); - tester.mine()?; - notify_wallet(); - assert_balance(0.3, 0.0); + let txid1 = wt.tester.send(&addr1, "0.1 BTC".parse().unwrap())?; + wt.tester.mine()?; + let txid2 = wt.tester.send(&addr2, "0.2 BTC".parse().unwrap())?; + wt.tester.mine()?; + wt.notify_wallet(); - // Verify that the transaction history checks out - let history = electrum_wallet.call("onchain_history", &json!([]))?; + let history = wt.electrum_wallet.call("onchain_history", &json!([]))?; log::debug!("history = {:#?}", history); assert_eq!( history["transactions"][0]["txid"].as_str(), @@ -119,37 +153,47 @@ fn test_electrum() -> Result<()> { assert_eq!(history["transactions"][1]["height"].as_u64(), Some(103)); assert_eq!(history["transactions"][1]["bc_value"].as_str(), Some("0.2")); - // Send an outgoing payment - electrum_wallet.call( + Ok(()) +} + +/// Test sending an outgoing payment +#[cfg_attr(not(feature = "liquid"), test)] +#[cfg_attr(feature = "liquid", allow(dead_code))] +fn test_electrum_payment() -> Result<()> { + let mut wt = WalletTester::new()?; + + let addr1 = wt.newaddress(); + wt.tester.send(&addr1, "0.3 BTC".parse().unwrap())?; + wt.tester.mine()?; + wt.notify_wallet(); + wt.assert_balance(0.3, 0.0); + + wt.electrum_wallet.call( "broadcast", - &json!([electrum_wallet.call( + &json!([wt.electrum_wallet.call( "payto", &json!({ - "destination": tester.node_client().get_new_address(None, None)?, + "destination": wt.tester.node_client().get_new_address(None, None)?, "amount": 0.16, "fee": 0.001, }), )?]), )?; - notify_wallet(); - assert_balance(0.139, 0.0); + wt.notify_wallet(); + wt.assert_balance(0.139, 0.0); - tester.mine()?; - notify_wallet(); - assert_balance(0.139, 0.0); + wt.tester.mine()?; + wt.notify_wallet(); + wt.assert_balance(0.139, 0.0); Ok(()) } /// Test the Electrum RPC server using a raw TCP socket -/// This only runs on Bitcoin (non-Liquid) mode. #[cfg_attr(not(feature = "liquid"), test)] #[cfg_attr(feature = "liquid", allow(dead_code))] -#[ignore = "must be launched singularly, otherwise conflict with the other server"] fn test_electrum_raw() { - // Spawn an Electrs Electrum RPC server let (_electrum_server, electrum_addr, mut _tester) = common::init_electrum_tester().unwrap(); - std::thread::sleep(std::time::Duration::from_millis(1000)); let mut stream = TcpStream::connect(electrum_addr).unwrap(); let write = "{\"jsonrpc\": \"2.0\", \"method\": \"server.version\", \"id\": 0}"; diff --git a/tests/rest.rs b/tests/rest.rs index 382ad16fd..872416c51 100644 --- a/tests/rest.rs +++ b/tests/rest.rs @@ -1,6 +1,13 @@ +use bitcoin::hashes::{sha256, Hash}; +use bitcoin::hex::FromHex; use bitcoind::bitcoincore_rpc::RpcApi; use serde_json::Value; use std::collections::HashSet; +use std::io::Read; +use std::net; + +#[cfg(not(feature = "liquid"))] +use {bitcoin::Amount, serde_json::from_value}; use electrs::chain::Txid; @@ -8,21 +15,21 @@ pub mod common; use common::Result; -#[test] -fn test_rest() -> Result<()> { - let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); +fn get(rest_addr: net::SocketAddr, path: &str) -> std::result::Result { + ureq::get(&format!("http://{}{}", rest_addr, path)).call() +} - let get_json = |path: &str| -> Result { - Ok(ureq::get(&format!("http://{}{}", rest_addr, path)) - .call()? - .into_json::()?) - }; +fn get_json(rest_addr: net::SocketAddr, path: &str) -> Result { + Ok(get(rest_addr, path)?.into_json::()?) +} - let get_plain = |path: &str| -> Result { - Ok(ureq::get(&format!("http://{}{}", rest_addr, path)) - .call()? - .into_string()?) - }; +fn get_plain(rest_addr: net::SocketAddr, path: &str) -> Result { + Ok(get(rest_addr, path)?.into_string()?) +} + +#[test] +fn test_rest_tx() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); // Send transaction and confirm it let addr1 = tester.newaddress()?; @@ -33,45 +40,143 @@ fn test_rest() -> Result<()> { let txid2_mempool = tester.send(&addr1, "0.7113 BTC".parse().unwrap())?; // Test GET /tx/:txid - let res = get_json(&format!("/tx/{}", txid1_confirmed))?; + let res = get_json(rest_addr, &format!("/tx/{}", txid1_confirmed))?; log::debug!("tx: {:#?}", res); + + // Verify TransactionValue fields with actual values + assert_eq!( + res["txid"].as_str(), + Some(txid1_confirmed.to_string().as_str()) + ); + assert_eq!(res["version"].as_u64(), Some(2)); + assert!(res["locktime"].as_u64().is_some()); + assert!(res["size"].as_u64().unwrap() > 0); + assert!(res["weight"].as_u64().unwrap() > 0); + assert!(res["fee"].as_u64().unwrap() > 0); + #[cfg(feature = "liquid")] + { + assert_eq!(res["discount_vsize"].as_u64().unwrap(), 228); + assert_eq!(res["discount_weight"].as_u64().unwrap(), 912); + } + + // Verify status on the TransactionValue itself + assert_eq!(res["status"]["confirmed"].as_bool(), Some(true)); + assert_eq!(res["status"]["block_height"].as_u64(), Some(102)); + assert!(res["status"]["block_hash"].is_string()); + assert!(res["status"]["block_time"].as_u64().unwrap() > 0); + + // Verify vout fields and find our target output let outs = res["vout"].as_array().expect("array of outs"); assert!(outs.iter().any(|vout| { vout["scriptpubkey_address"].as_str() == Some(&addr1.to_string()) && vout["value"].as_u64() == Some(119123000) })); - #[cfg(feature = "liquid")] + for vout in outs { + assert!(vout["scriptpubkey"].is_string()); + assert!(vout["scriptpubkey_asm"].is_string()); + assert!(vout["scriptpubkey_type"].is_string()); + } + // Verify our target output's scriptpubkey_type (Bitcoin uses segwit address types) + #[cfg(not(feature = "liquid"))] { - assert_eq!(res["discount_vsize"].as_u64().unwrap(), 228); - assert_eq!(res["discount_weight"].as_u64().unwrap(), 912); + let target_vout = outs + .iter() + .find(|v| v["scriptpubkey_address"].as_str() == Some(&addr1.to_string())) + .unwrap(); + let spk_type = target_vout["scriptpubkey_type"].as_str().unwrap(); + assert!( + spk_type == "v0_p2wpkh" || spk_type == "v1_p2tr", + "unexpected scriptpubkey_type: {}", + spk_type + ); } - // Test GET /tx/:txid/status - let res = get_json(&format!("/tx/{}/status", txid1_confirmed))?; + // Verify vin fields (non-coinbase input) + let vin0 = &res["vin"][0]; + assert!(vin0["txid"].is_string()); + assert!(vin0["vout"].is_u64()); + assert_eq!(vin0["is_coinbase"].as_bool(), Some(false)); + assert!(vin0["sequence"].as_u64().is_some()); + assert!(vin0["scriptsig"].is_string()); + assert!(vin0["scriptsig_asm"].is_string()); + // prevout should be present for non-coinbase inputs + assert!(vin0["prevout"].is_object()); + assert!(vin0["prevout"]["scriptpubkey"].is_string()); + assert!(vin0["prevout"]["scriptpubkey_type"].is_string()); + #[cfg(not(feature = "liquid"))] + assert!(vin0["prevout"]["value"].as_u64().unwrap() > 0); + + // Verify coinbase tx input + let block_hash = res["status"]["block_hash"].as_str().unwrap(); + let block_txs = get_json(rest_addr, &format!("/block/{}/txs", block_hash))?; + let coinbase_tx = &block_txs.as_array().unwrap()[0]; + let cb_vin = &coinbase_tx["vin"][0]; + assert_eq!(cb_vin["is_coinbase"].as_bool(), Some(true)); + assert!(cb_vin["scriptsig"].is_string()); + assert!(cb_vin["scriptsig_asm"].is_string()); + assert!(cb_vin["prevout"].is_null()); + + // Test GET /tx/:txid/status (confirmed) + let res = get_json(rest_addr, &format!("/tx/{}/status", txid1_confirmed))?; assert_eq!(res["confirmed"].as_bool(), Some(true)); assert_eq!(res["block_height"].as_u64(), Some(102)); + assert!(res["block_hash"].is_string()); + assert!(res["block_time"].as_u64().unwrap() > 0); - let res = get_json(&format!("/tx/{}/status", txid2_mempool))?; + // Test GET /tx/:txid/status (unconfirmed) + let res = get_json(rest_addr, &format!("/tx/{}/status", txid2_mempool))?; assert_eq!(res["confirmed"].as_bool(), Some(false)); assert_eq!(res["block_height"].as_u64(), None); + assert!(res["block_hash"].is_null()); + assert!(res["block_time"].is_null()); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_address() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid1_confirmed = tester.send(&addr1, "1.19123 BTC".parse().unwrap())?; + tester.mine()?; + + let txid2_mempool = tester.send(&addr1, "0.7113 BTC".parse().unwrap())?; // Test GET /address/:address - let res = get_json(&format!("/address/{}", addr1))?; + let res = get_json(rest_addr, &format!("/address/{}", addr1))?; + assert_eq!(res["address"].as_str(), Some(addr1.to_string().as_str())); + + // chain_stats: 1 confirmed funding tx, nothing spent + assert_eq!(res["chain_stats"]["tx_count"].as_u64(), Some(1)); assert_eq!(res["chain_stats"]["funded_txo_count"].as_u64(), Some(1)); + assert_eq!(res["chain_stats"]["spent_txo_count"].as_u64(), Some(0)); #[cfg(not(feature = "liquid"))] - assert_eq!( - res["chain_stats"]["funded_txo_sum"].as_u64(), - Some(119123000) - ); + { + assert_eq!( + res["chain_stats"]["funded_txo_sum"].as_u64(), + Some(119123000) + ); + assert_eq!(res["chain_stats"]["spent_txo_sum"].as_u64(), Some(0)); + } + + // mempool_stats: 1 unconfirmed funding tx; the wallet may also spend + // addr1's confirmed UTXO as an input, so spent_txo_count can be 0 or 1 + assert!(res["mempool_stats"]["tx_count"].as_u64().unwrap() >= 1); assert_eq!(res["mempool_stats"]["funded_txo_count"].as_u64(), Some(1)); + assert!(res["mempool_stats"]["spent_txo_count"].is_u64()); #[cfg(not(feature = "liquid"))] - assert_eq!( - res["mempool_stats"]["funded_txo_sum"].as_u64(), - Some(71130000) - ); + { + assert_eq!( + res["mempool_stats"]["funded_txo_sum"].as_u64(), + Some(71130000) + ); + assert!(res["mempool_stats"]["spent_txo_sum"].is_u64()); + } // Test GET /address/:address/txs - let res = get_json(&format!("/address/{}/txs", addr1))?; + let res = get_json(rest_addr, &format!("/address/{}/txs", addr1))?; let txs = res.as_array().expect("array of transactions"); let mut txids = txs .iter() @@ -83,34 +188,42 @@ fn test_rest() -> Result<()> { // Test GET /address-prefix/:prefix let addr1_prefix = &addr1.to_string()[0..8]; - let res = get_json(&format!("/address-prefix/{}", addr1_prefix))?; + let res = get_json(rest_addr, &format!("/address-prefix/{}", addr1_prefix))?; let found = res.as_array().expect("array of matching addresses"); assert_eq!(found.len(), 1); assert_eq!(found[0].as_str(), Some(addr1.to_string().as_str())); + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_blocks() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + // Test GET /blocks/tip/hash let bestblockhash = tester.node_client().get_best_block_hash()?; - let res = get_plain("/blocks/tip/hash")?; + let res = get_plain(rest_addr, "/blocks/tip/hash")?; assert_eq!(res, bestblockhash.to_string()); let bestblockhash = tester.mine()?; - let res = get_plain("/blocks/tip/hash")?; + let res = get_plain(rest_addr, "/blocks/tip/hash")?; assert_eq!(res, bestblockhash.to_string()); // Test GET /blocks/tip/height let bestblockheight = tester.node_client().get_block_count()?; - let res = get_plain("/blocks/tip/height")?; + let res = get_plain(rest_addr, "/blocks/tip/height")?; assert_eq!( res.parse::().expect("tip block height as an int"), bestblockheight ); // Test GET /block-height/:height - let res = get_plain(&format!("/block-height/{}", bestblockheight))?; + let res = get_plain(rest_addr, &format!("/block-height/{}", bestblockheight))?; assert_eq!(res, bestblockhash.to_string()); // Test GET /blocks - let res = get_json("/blocks")?; + let res = get_json(rest_addr, "/blocks")?; let last_blocks = res.as_array().unwrap(); assert_eq!(last_blocks.len(), 10); // limited to 10 per page assert_eq!( @@ -118,19 +231,62 @@ fn test_rest() -> Result<()> { Some(bestblockhash.to_string().as_str()) ); + // Verify first block (tip) has correct height + assert_eq!( + last_blocks[0]["height"].as_u64(), + Some(bestblockheight) + ); + + // Verify block list entries have all BlockValue fields with value checks + for block in last_blocks { + assert!(block["id"].is_string()); + assert!(block["height"].is_u64()); + assert!(block["version"].is_u64()); + assert!(block["timestamp"].as_u64().unwrap() > 0); + assert!(block["tx_count"].as_u64().unwrap() >= 1); // coinbase at minimum + assert!(block["size"].as_u64().unwrap() > 0); + assert!(block["weight"].as_u64().unwrap() > 0); + assert!(block["merkle_root"].is_string()); + assert!(block["mediantime"].as_u64().unwrap() > 0); + #[cfg(not(feature = "liquid"))] + { + assert!(block["nonce"].is_u64()); + assert!(block["bits"].is_u64()); + assert!(block["difficulty"].is_f64()); + } + } + + // Verify previousblockhash links blocks together correctly + for i in 0..last_blocks.len() - 1 { + assert_eq!( + last_blocks[i]["previousblockhash"].as_str(), + last_blocks[i + 1]["id"].as_str() + ); + } + let bestblockhash = tester.mine()?; - let res = get_json("/blocks")?; + let res = get_json(rest_addr, "/blocks")?; let last_blocks = res.as_array().unwrap(); assert_eq!( last_blocks[0]["id"].as_str(), Some(bestblockhash.to_string().as_str()) ); + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_block() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + // Test GET /block/:hash let txid = tester.send(&addr1, "0.98765432 BTC".parse().unwrap())?; let blockhash = tester.mine()?; - let res = get_json(&format!("/block/{}", blockhash))?; + let res = get_json(rest_addr, &format!("/block/{}", blockhash))?; assert_eq!(res["id"].as_str(), Some(blockhash.to_string().as_str())); assert_eq!( res["height"].as_u64(), @@ -138,8 +294,40 @@ fn test_rest() -> Result<()> { ); assert_eq!(res["tx_count"].as_u64(), Some(2)); + // Cross-reference BlockValue fields against bitcoind's getblockheader + let node_header: Value = + tester.call("getblockheader", &[blockhash.to_string().into()])?; + assert_eq!(res["version"].as_u64(), node_header["version"].as_u64()); + assert_eq!(res["timestamp"].as_u64(), node_header["time"].as_u64()); + assert_eq!( + res["merkle_root"].as_str(), + node_header["merkleroot"].as_str() + ); + assert_eq!( + res["previousblockhash"].as_str(), + node_header["previousblockhash"].as_str() + ); + assert_eq!(res["mediantime"].as_u64(), node_header["mediantime"].as_u64()); + assert!(res["size"].as_u64().unwrap() > 0); + assert!(res["weight"].as_u64().unwrap() > 0); + #[cfg(not(feature = "liquid"))] + { + assert_eq!(res["nonce"].as_u64(), node_header["nonce"].as_u64()); + // bits is serialized differently (compact target int vs hex string), just check presence + assert!(res["bits"].is_u64()); + assert!(res["difficulty"].is_f64()); + } + + // Test GET /block/:hash/raw + let mut res = get(rest_addr, &format!("/block/{}/raw", blockhash))?.into_reader(); + let mut rest_rawblock = Vec::new(); + res.read_to_end(&mut rest_rawblock).unwrap(); + let node_hexblock = // uses low-level call() to support Elements + tester.call::("getblock", &[blockhash.to_string().into(), 0.into()])?; + assert_eq!(rest_rawblock, Vec::from_hex(&node_hexblock).unwrap()); + // Test GET /block/:hash/txs - let res = get_json(&format!("/block/{}/txs", blockhash))?; + let res = get_json(rest_addr, &format!("/block/{}/txs", blockhash))?; let block_txs = res.as_array().expect("list of txs"); assert_eq!(block_txs.len(), 2); assert_eq!(block_txs[0]["vin"][0]["is_coinbase"].as_bool(), Some(true)); @@ -149,33 +337,64 @@ fn test_rest() -> Result<()> { ); // Test GET /block/:hash/txid/:index - let res = get_plain(&format!("/block/{}/txid/1", blockhash))?; + let res = get_plain(rest_addr, &format!("/block/{}/txid/1", blockhash))?; assert_eq!(res, txid.to_string()); + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_mempool() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + // Test GET /mempool/txids let txid = tester.send(&addr1, "3.21 BTC".parse().unwrap())?; - let res = get_json("/mempool/txids")?; + let res = get_json(rest_addr, "/mempool/txids")?; let mempool_txids = res.as_array().expect("list of txids"); assert_eq!(mempool_txids.len(), 1); assert_eq!(mempool_txids[0].as_str(), Some(txid.to_string().as_str())); tester.send(&addr1, "0.0001 BTC".parse().unwrap())?; - let res = get_json("/mempool/txids")?; + let res = get_json(rest_addr, "/mempool/txids")?; let mempool_txids = res.as_array().expect("list of txids"); assert_eq!(mempool_txids.len(), 2); // Test GET /mempool - assert_eq!(get_json("/mempool")?["count"].as_u64(), Some(2)); + let mempool_stats = get_json(rest_addr, "/mempool")?; + assert_eq!(mempool_stats["count"].as_u64(), Some(2)); + assert!(mempool_stats["vsize"].as_u64().unwrap() > 0); + assert!(mempool_stats["total_fee"].as_u64().unwrap() > 0); + assert!(mempool_stats["fee_histogram"].is_array()); tester.send(&addr1, "0.00022 BTC".parse().unwrap())?; - assert_eq!(get_json("/mempool")?["count"].as_u64(), Some(3)); + assert_eq!(get_json(rest_addr, "/mempool")?["count"].as_u64(), Some(3)); tester.mine()?; - assert_eq!(get_json("/mempool")?["count"].as_u64(), Some(0)); + let mempool_after = get_json(rest_addr, "/mempool")?; + assert_eq!(mempool_after["count"].as_u64(), Some(0)); + assert_eq!(mempool_after["vsize"].as_u64(), Some(0)); + assert_eq!(mempool_after["total_fee"].as_u64(), Some(0)); + assert_eq!( + mempool_after["fee_histogram"].as_array().unwrap().len(), + 0 + ); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_broadcast_tx() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; // Test POST /tx let txid = tester.send(&addr1, "9.9 BTC".parse().unwrap())?; - let tx_hex = get_plain(&format!("/tx/{}/hex", txid))?; + let tx_hex = get_plain(rest_addr, &format!("/tx/{}/hex", txid))?; // Re-send the tx created by send(). It'll be accepted again since its still in the mempool. let broadcast1_resp = ureq::post(&format!("http://{}/tx", rest_addr)).send_string(&tx_hex)?; assert_eq!(broadcast1_resp.status(), 200); @@ -186,6 +405,14 @@ fn test_rest() -> Result<()> { let broadcast2_resp = broadcast2_res.unwrap_err().into_response().unwrap(); assert_eq!(broadcast2_resp.status(), 400); + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_package_validation() -> Result<()> { + let (rest_handle, rest_addr, _tester) = common::init_rest_tester().unwrap(); + // Test POST /txs/package - simple validation test // Test with invalid JSON first to verify the endpoint exists let invalid_package_result = ureq::post(&format!("http://{}/txs/package", rest_addr)) @@ -207,88 +434,678 @@ fn test_rest() -> Result<()> { let status = empty_package_resp.status(); assert_eq!(status, 400); - // bitcoin 28.0 only tests - submitpackage - #[cfg(all(not(feature = "liquid"), feature = "bitcoind_28_0"))] + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_block_status() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + let blockhash1 = tester.mine()?; + let blockhash2 = tester.mine()?; // tip + + let block_count = tester.node_client().get_block_count()?; + + // Non-tip block should have next_best pointing to next block + let res = get_json(rest_addr, &format!("/block/{}/status", blockhash1))?; + assert_eq!(res["in_best_chain"].as_bool(), Some(true)); + assert_eq!(res["height"].as_u64(), Some(block_count - 1)); + assert_eq!( + res["next_best"].as_str(), + Some(blockhash2.to_string().as_str()) + ); + + // Tip block should have next_best as null + let res = get_json(rest_addr, &format!("/block/{}/status", blockhash2))?; + assert_eq!(res["in_best_chain"].as_bool(), Some(true)); + assert_eq!(res["height"].as_u64(), Some(block_count)); + assert!(res["next_best"].is_null()); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_block_txids() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + let blockhash = tester.mine()?; + + let res = get_json(rest_addr, &format!("/block/{}/txids", blockhash))?; + let txids = res.as_array().expect("array of txids"); + + // Should match tx_count from /block/:hash + let block = get_json(rest_addr, &format!("/block/{}", blockhash))?; + assert_eq!(txids.len(), block["tx_count"].as_u64().unwrap() as usize); + + // First txid should be the coinbase (not our user txid) + assert_ne!( + txids[0].as_str(), + Some(txid.to_string().as_str()), + "first txid should be coinbase, not user tx" + ); + // Our txid should be present + assert!(txids + .iter() + .any(|t| t.as_str() == Some(&txid.to_string()))); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_block_header() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let blockhash = tester.mine()?; + let header_hex = get_plain(rest_addr, &format!("/block/{}/header", blockhash))?; + + // Verify it's valid hex + let header_bytes = Vec::from_hex(&header_hex).expect("valid hex"); + assert!(!header_bytes.is_empty()); + + // On Bitcoin, verify the header is 80 bytes and its hash matches the block hash + #[cfg(not(feature = "liquid"))] { - // Test with a real transaction package - create parent-child transactions - // submitpackage requires between 2 and 25 transactions with proper dependencies - let package_addr1 = tester.newaddress()?; - let package_addr2 = tester.newaddress()?; - - // Create parent transaction - let tx1_result = tester.node_client().call::( - "createrawtransaction", - &[ - serde_json::json!([]), - serde_json::json!({package_addr1.to_string(): 0.5}), - ], - )?; - let tx1_unsigned_hex = tx1_result.as_str().expect("raw tx hex").to_string(); - - let tx1_fund_result = tester - .node_client() - .call::("fundrawtransaction", &[serde_json::json!(tx1_unsigned_hex)])?; - let tx1_funded_hex = tx1_fund_result["hex"] - .as_str() - .expect("funded tx hex") - .to_string(); - - let tx1_sign_result = tester.node_client().call::( - "signrawtransactionwithwallet", - &[serde_json::json!(tx1_funded_hex)], + assert_eq!(header_bytes.len(), 80); + let header: bitcoin::block::Header = + bitcoin::consensus::deserialize(&header_bytes).expect("valid header"); + assert_eq!(header.block_hash().to_string(), blockhash.to_string()); + } + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_address_mempool_txs() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + + // Send tx to address but don't mine + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + + // Verify it appears in mempool txs + let res = get_json(rest_addr, &format!("/address/{}/txs/mempool", addr1))?; + let txs = res.as_array().expect("array of txs"); + assert_eq!(txs.len(), 1); + assert_eq!(txs[0]["txid"].as_str(), Some(txid.to_string().as_str())); + assert_eq!(txs[0]["status"]["confirmed"].as_bool(), Some(false)); + assert!(txs[0]["fee"].as_u64().unwrap() > 0); + + // Mine and verify mempool list is now empty + tester.mine()?; + let res = get_json(rest_addr, &format!("/address/{}/txs/mempool", addr1))?; + let txs = res.as_array().expect("array of txs"); + assert!(txs.is_empty()); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_address_utxo() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + + // Send to address and mine - verify confirmed UTXO + let sent_txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + tester.mine()?; + + let res = get_json(rest_addr, &format!("/address/{}/utxo", addr1))?; + let utxos = res.as_array().expect("array of utxos"); + assert_eq!(utxos.len(), 1); + assert_eq!( + utxos[0]["txid"].as_str(), + Some(sent_txid.to_string().as_str()) + ); + assert!(utxos[0]["vout"].is_u64()); + assert_eq!(utxos[0]["status"]["confirmed"].as_bool(), Some(true)); + assert_eq!(utxos[0]["status"]["block_height"].as_u64(), Some(102)); + assert!(utxos[0]["status"]["block_hash"].is_string()); + assert!(utxos[0]["status"]["block_time"].as_u64().unwrap() > 0); + #[cfg(not(feature = "liquid"))] + assert_eq!(utxos[0]["value"].as_u64(), Some(50000000)); + + // Send again without mining - the wallet may spend the existing UTXO as input, + // so we just verify that UTXOs exist and have correct fields + tester.send(&addr1, "0.3 BTC".parse().unwrap())?; + let res = get_json(rest_addr, &format!("/address/{}/utxo", addr1))?; + let utxos = res.as_array().expect("array of utxos"); + assert!(!utxos.is_empty()); + for utxo in utxos { + assert!(utxo["txid"].is_string()); + assert!(utxo["vout"].is_u64()); + assert!(utxo["status"].is_object()); + assert!(utxo["status"]["confirmed"].is_boolean()); + } + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_scripthash() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + tester.mine()?; + tester.send(&addr1, "0.3 BTC".parse().unwrap())?; // mempool tx + + // Get the scriptpubkey from a tx to addr1 + let addr_txs = get_json(rest_addr, &format!("/address/{}/txs", addr1))?; + let txs = addr_txs.as_array().unwrap(); + let vout = txs[0]["vout"] + .as_array() + .unwrap() + .iter() + .find(|v| v["scriptpubkey_address"].as_str() == Some(&addr1.to_string())) + .expect("vout to our address"); + let scriptpubkey_hex = vout["scriptpubkey"].as_str().unwrap(); + let scriptpubkey_bytes = Vec::from_hex(scriptpubkey_hex).unwrap(); + + // Compute scripthash (SHA256 of scriptpubkey bytes) + let scripthash = sha256::Hash::hash(&scriptpubkey_bytes).to_string(); + + // Verify /scripthash/:hash matches /address/:address + // (the top-level objects differ by "address" vs "scripthash" key, so compare stats) + let addr_stats = get_json(rest_addr, &format!("/address/{}", addr1))?; + let sh_stats = get_json(rest_addr, &format!("/scripthash/{}", scripthash))?; + assert_eq!(addr_stats["chain_stats"], sh_stats["chain_stats"]); + assert_eq!(addr_stats["mempool_stats"], sh_stats["mempool_stats"]); + + // Verify /scripthash/:hash/txs matches /address/:address/txs + let addr_txs = get_json(rest_addr, &format!("/address/{}/txs", addr1))?; + let sh_txs = get_json(rest_addr, &format!("/scripthash/{}/txs", scripthash))?; + assert_eq!(addr_txs, sh_txs); + + // Verify /scripthash/:hash/txs/chain matches /address/:address/txs/chain + let addr_chain = get_json(rest_addr, &format!("/address/{}/txs/chain", addr1))?; + let sh_chain = get_json(rest_addr, &format!("/scripthash/{}/txs/chain", scripthash))?; + assert_eq!(addr_chain, sh_chain); + + // Verify /scripthash/:hash/txs/mempool matches /address/:address/txs/mempool + let addr_mempool = get_json(rest_addr, &format!("/address/{}/txs/mempool", addr1))?; + let sh_mempool = get_json(rest_addr, &format!("/scripthash/{}/txs/mempool", scripthash))?; + assert_eq!(addr_mempool, sh_mempool); + + // Verify /scripthash/:hash/utxo matches /address/:address/utxo + let addr_utxo = get_json(rest_addr, &format!("/address/{}/utxo", addr1))?; + let sh_utxo = get_json(rest_addr, &format!("/scripthash/{}/utxo", scripthash))?; + assert_eq!(addr_utxo, sh_utxo); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_tx_outspends() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + tester.mine()?; + + // Check outspends of a freshly mined tx - outputs should be unspent + let res = get_json(rest_addr, &format!("/tx/{}/outspends", txid))?; + let outspends = res.as_array().expect("array of outspends"); + assert!(!outspends.is_empty()); + for outspend in outspends { + assert_eq!(outspend["spent"].as_bool(), Some(false)); + assert!(outspend["txid"].is_null()); + assert!(outspend["vin"].is_null()); + assert!(outspend["status"].is_null()); + } + + // The send tx spent some input. Check that the parent tx shows a spent output. + let tx_detail = get_json(rest_addr, &format!("/tx/{}", txid))?; + let spent_txid = tx_detail["vin"][0]["txid"].as_str().unwrap(); + let spent_vout = tx_detail["vin"][0]["vout"].as_u64().unwrap(); + let spent_vin = 0u64; // our tx is the spender, using vin index 0 + + let res = get_json(rest_addr, &format!("/tx/{}/outspends", spent_txid))?; + let outspends = res.as_array().expect("array of outspends"); + let spent_entry = &outspends[spent_vout as usize]; + assert_eq!(spent_entry["spent"].as_bool(), Some(true)); + assert_eq!( + spent_entry["txid"].as_str(), + Some(txid.to_string().as_str()) + ); + assert_eq!(spent_entry["vin"].as_u64(), Some(spent_vin)); + assert_eq!(spent_entry["status"]["confirmed"].as_bool(), Some(true)); + assert_eq!(spent_entry["status"]["block_height"].as_u64(), Some(102)); + assert!(spent_entry["status"]["block_hash"].is_string()); + assert!(spent_entry["status"]["block_time"].as_u64().unwrap() > 0); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_tx_merkle_proof() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + tester.mine()?; + + let res = get_json(rest_addr, &format!("/tx/{}/merkle-proof", txid))?; + assert_eq!(res["block_height"].as_u64(), Some(102)); + let merkle = res["merkle"].as_array().expect("merkle array"); + assert!(!merkle.is_empty()); + for entry in merkle { + let hex = entry.as_str().expect("merkle entry is string"); + assert_eq!(hex.len(), 64, "merkle hash should be 64 hex chars"); + assert!( + hex.chars().all(|c| c.is_ascii_hexdigit()), + "merkle hash should be valid hex" + ); + } + assert!(res["pos"].as_u64().is_some()); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(not(feature = "liquid"))] +#[test] +fn test_rest_tx_merkleblock_proof() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + tester.mine()?; + + let hex = get_plain(rest_addr, &format!("/tx/{}/merkleblock-proof", txid))?; + assert!(!hex.is_empty()); + // Verify it's valid hex + let bytes = Vec::from_hex(&hex).expect("valid hex"); + assert!(!bytes.is_empty()); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_mempool_recent() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid1 = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + let txid2 = tester.send(&addr1, "0.3 BTC".parse().unwrap())?; + + let res = get_json(rest_addr, "/mempool/recent")?; + let recent = res.as_array().expect("array of recent txs"); + assert!(recent.len() >= 2); + + for entry in recent { + assert!(entry["txid"].is_string()); + assert!(entry["fee"].as_u64().unwrap() > 0); + assert!(entry["vsize"].as_u64().unwrap() > 0); + #[cfg(not(feature = "liquid"))] + assert!(entry["value"].as_u64().unwrap() > 0); + } + + // Verify our sent txids are included + let recent_txids: HashSet<&str> = recent + .iter() + .map(|e| e["txid"].as_str().unwrap()) + .collect(); + assert!(recent_txids.contains(txid1.to_string().as_str())); + assert!(recent_txids.contains(txid2.to_string().as_str())); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_fee_estimates() -> Result<()> { + let (rest_handle, rest_addr, _tester) = common::init_rest_tester().unwrap(); + + let res = get_json(rest_addr, "/fee-estimates")?; + // On regtest, may be empty but should be a JSON object + assert!(res.is_object()); + + rest_handle.stop(); + Ok(()) +} + +#[test] +fn test_rest_broadcast_get() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let addr1 = tester.newaddress()?; + let txid = tester.send(&addr1, "0.5 BTC".parse().unwrap())?; + let tx_hex = get_plain(rest_addr, &format!("/tx/{}/hex", txid))?; + + // Re-send via GET /broadcast?tx=:txhex (legacy endpoint) + let res = get_plain(rest_addr, &format!("/broadcast?tx={}", tx_hex))?; + assert_eq!(res, txid.to_string()); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(not(feature = "liquid"))] +#[test] +fn test_rest_reorg() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let get_conf_height = |txid| -> Result> { + Ok(get_json(rest_addr, &format!("/tx/{}/status", txid))?["block_height"].as_u64()) + }; + let get_chain_stats = |addr| -> Result { + Ok(get_json(rest_addr, &format!("/address/{}", addr))?["chain_stats"].take()) + }; + let get_chain_txs = |addr| -> Result> { + Ok(from_value(get_json( + rest_addr, + &format!("/address/{}/txs/chain", addr), + )?)?) + }; + let get_outspend = |outpoint: &bitcoin::OutPoint| -> Result { + get_json( + rest_addr, + &format!("/tx/{}/outspend/{}", outpoint.txid, outpoint.vout), + ) + }; + + let init_height = tester.node_client().get_block_count()?; + + let address = tester.newaddress()?; + let miner_address = tester.newaddress()?; + + let txid_a = tester.send(&address, Amount::from_sat(100000))?; + let txid_b = tester.send(&address, Amount::from_sat(200000))?; + let txid_c = tester.send(&address, Amount::from_sat(500000))?; + + let tx_a = tester.get_raw_transaction(&txid_a, None)?; + let tx_b = tester.get_raw_transaction(&txid_b, None)?; + let tx_c = tester.get_raw_transaction(&txid_c, None)?; + + // Confirm tx_a, tx_b and tx_c + let blockhash_1 = tester.mine()?; + + assert_eq!( + get_plain(rest_addr, "/blocks/tip/height")?, + (init_height + 1).to_string() + ); + assert_eq!( + get_plain(rest_addr, "/blocks/tip/hash")?, + blockhash_1.to_string() + ); + assert_eq!(get_conf_height(&txid_a)?, Some(init_height + 1)); + assert_eq!(get_conf_height(&txid_b)?, Some(init_height + 1)); + assert_eq!(get_conf_height(&txid_c)?, Some(init_height + 1)); + assert_eq!( + get_chain_stats(&address)?["funded_txo_sum"].as_u64(), + Some(800000) + ); + assert_eq!(get_chain_txs(&address)?.len(), 3); + + let c_outspend = get_outspend(&tx_c.input[0].previous_output)?; + assert_eq!( + c_outspend["txid"].as_str(), + Some(txid_c.to_string().as_str()) + ); + assert_eq!( + c_outspend["status"]["block_height"].as_u64(), + Some(init_height + 1) + ); + + // Reorg the last block, re-confirm tx_a at the same height + tester.invalidate_block(&blockhash_1)?; + tester.call::( + "generateblock", + &[ + miner_address.to_string().into(), + [txid_a.to_string()].into(), + ], + )?; + // Re-confirm tx_b at a different height + tester.call::( + "generateblock", + &[ + miner_address.to_string().into(), + [txid_b.to_string()].into(), + ], + )?; + // Don't re-confirm tx_c at all + + let blockhash_2 = tester.get_best_block_hash()?; + + tester.sync()?; + + assert_eq!( + get_plain(rest_addr, "/blocks/tip/height")?, + (init_height + 2).to_string() + ); + assert_eq!( + get_plain(rest_addr, "/blocks/tip/hash")?, + blockhash_2.to_string() + ); + + // Test address stats (GET /address/:address) + assert_eq!( + get_chain_stats(&address)?["funded_txo_sum"].as_u64(), + Some(300000) + ); + + // Test address history (GET /address/:address/txs/chain) + let addr_txs = get_chain_txs(&address)?; + assert_eq!(addr_txs.len(), 2); + assert_eq!( + addr_txs[0]["txid"].as_str(), + Some(txid_b.to_string().as_str()) + ); + assert_eq!( + addr_txs[0]["status"]["block_height"].as_u64(), + Some(init_height + 2) + ); + assert_eq!( + addr_txs[1]["txid"].as_str(), + Some(txid_a.to_string().as_str()) + ); + assert_eq!( + addr_txs[1]["status"]["block_height"].as_u64(), + Some(init_height + 1) + ); + + // Test transaction status lookup (GET /tx/:txid/status) + assert_eq!(get_conf_height(&txid_a)?, Some(init_height + 1)); + assert_eq!(get_conf_height(&txid_b)?, Some(init_height + 2)); + assert_eq!(get_conf_height(&txid_c)?, None); + + // Test spend edge lookup (GET /tx/:txid/outspend/:vout) + let a_spends = get_outspend(&tx_a.input[0].previous_output)?; + assert_eq!(a_spends["txid"].as_str(), Some(txid_a.to_string().as_str())); + assert_eq!( + a_spends["status"]["block_height"].as_u64(), + Some(init_height + 1) + ); + let b_spends = get_outspend(&tx_b.input[0].previous_output)?; + assert_eq!(b_spends["txid"].as_str(), Some(txid_b.to_string().as_str())); + assert_eq!( + b_spends["status"]["block_height"].as_u64(), + Some(init_height + 2) + ); + let c_spends = get_outspend(&tx_c.input[0].previous_output)?; + assert_eq!(c_spends["status"]["confirmed"].as_bool(), Some(false)); + + // Test a deeper reorg, all the way back to exclude tx_b + tester.generate_to_address(15, &address)?; + tester.sync()?; + tester.invalidate_block(&blockhash_2)?; + + for _ in 0..20 { + // Mine some empty blocks, intentionally without tx_b + tester.call::( + "generateblock", + &[miner_address.to_string().into(), Vec::::new().into()], )?; - let tx1_signed_hex = tx1_sign_result["hex"] - .as_str() - .expect("signed tx hex") - .to_string(); - - // Decode parent transaction to get its txid and find the output to spend - let tx1_decoded = tester - .node_client() - .call::("decoderawtransaction", &[serde_json::json!(tx1_signed_hex)])?; - let tx1_txid = tx1_decoded["txid"].as_str().expect("parent txid"); - - // Find the output going to package_addr1 (the one we want to spend) - let tx1_vouts = tx1_decoded["vout"].as_array().expect("parent vouts"); - let mut spend_vout_index = None; - let mut spend_vout_value = 0u64; - - for (i, vout) in tx1_vouts.iter().enumerate() { - if let Some(script_pub_key) = vout.get("scriptPubKey") { - if let Some(address) = script_pub_key.get("address") { - if address.as_str() == Some(&package_addr1.to_string()) { - spend_vout_index = Some(i); - // Convert from BTC to satoshis - spend_vout_value = - (vout["value"].as_f64().expect("vout value") * 100_000_000.0) as u64; - break; - } + } + tester.sync()?; + + assert_eq!( + get_plain(rest_addr, "/blocks/tip/height")?, + (init_height + 21).to_string() + ); + assert_eq!( + get_plain(rest_addr, "/blocks/tip/hash")?, + tester.get_best_block_hash()?.to_string() + ); + + assert_eq!( + get_chain_stats(&address)?["funded_txo_sum"].as_u64(), + Some(100000) + ); + + let addr_txs = get_chain_txs(&address)?; + assert_eq!(addr_txs.len(), 1); + assert_eq!( + addr_txs[0]["txid"].as_str(), + Some(txid_a.to_string().as_str()) + ); + assert_eq!( + addr_txs[0]["status"]["block_height"].as_u64(), + Some(init_height + 1) + ); + + assert_eq!(get_conf_height(&txid_a)?, Some(init_height + 1)); + assert_eq!(get_conf_height(&txid_b)?, None); + assert_eq!(get_conf_height(&txid_c)?, None); + + let a_spends = get_outspend(&tx_a.input[0].previous_output)?; + assert_eq!( + a_spends["status"]["block_height"].as_u64(), + Some(init_height + 1) + ); + let b_spends = get_outspend(&tx_b.input[0].previous_output)?; + assert_eq!(b_spends["spent"].as_bool(), Some(false)); + let c_spends = get_outspend(&tx_b.input[0].previous_output)?; + assert_eq!(c_spends["spent"].as_bool(), Some(false)); + + // Invalidate the tip with no replacement, shortening the chain by one block + tester.invalidate_block(&tester.get_best_block_hash()?)?; + tester.sync()?; + assert_eq!( + get_plain(rest_addr, "/blocks/tip/height")?, + (init_height + 20).to_string() + ); + + // Reorg everything back to genesis + tester.invalidate_block(&tester.get_block_hash(1)?)?; + tester.sync()?; + + assert_eq!(get_plain(rest_addr, "/blocks/tip/height")?, 0.to_string()); + assert_eq!( + get_chain_stats(&address)?["funded_txo_sum"].as_u64(), + Some(0) + ); + assert_eq!(get_chain_txs(&address)?.len(), 0); + assert_eq!(get_conf_height(&txid_a)?, None); + assert_eq!(get_conf_height(&txid_b)?, None); + assert_eq!(get_conf_height(&txid_c)?, None); + let a_spends = get_outspend(&tx_a.input[0].previous_output)?; + assert_eq!(a_spends["spent"].as_bool(), Some(false)); + + rest_handle.stop(); + Ok(()) +} + +// bitcoin 28.0 only tests - submitpackage +#[cfg(all(not(feature = "liquid"), feature = "bitcoind_28_0"))] +#[test] +fn test_rest_submit_package() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + // Test with a real transaction package - create parent-child transactions + // submitpackage requires between 2 and 25 transactions with proper dependencies + let package_addr1 = tester.newaddress()?; + let package_addr2 = tester.newaddress()?; + + // Create parent transaction + let tx1_result = tester.node_client().call::( + "createrawtransaction", + &[ + serde_json::json!([]), + serde_json::json!({package_addr1.to_string(): 0.5}), + ], + )?; + let tx1_unsigned_hex = tx1_result.as_str().expect("raw tx hex").to_string(); + + let tx1_fund_result = tester + .node_client() + .call::("fundrawtransaction", &[serde_json::json!(tx1_unsigned_hex)])?; + let tx1_funded_hex = tx1_fund_result["hex"] + .as_str() + .expect("funded tx hex") + .to_string(); + + let tx1_sign_result = tester.node_client().call::( + "signrawtransactionwithwallet", + &[serde_json::json!(tx1_funded_hex)], + )?; + let tx1_signed_hex = tx1_sign_result["hex"] + .as_str() + .expect("signed tx hex") + .to_string(); + + // Decode parent transaction to get its txid and find the output to spend + let tx1_decoded = tester + .node_client() + .call::("decoderawtransaction", &[serde_json::json!(tx1_signed_hex)])?; + let tx1_txid = tx1_decoded["txid"].as_str().expect("parent txid"); + + // Find the output going to package_addr1 (the one we want to spend) + let tx1_vouts = tx1_decoded["vout"].as_array().expect("parent vouts"); + let mut spend_vout_index = None; + let mut spend_vout_value = 0u64; + + for (i, vout) in tx1_vouts.iter().enumerate() { + if let Some(script_pub_key) = vout.get("scriptPubKey") { + if let Some(address) = script_pub_key.get("address") { + if address.as_str() == Some(&package_addr1.to_string()) { + spend_vout_index = Some(i); + // Convert from BTC to satoshis + spend_vout_value = + (vout["value"].as_f64().expect("vout value") * 100_000_000.0) as u64; + break; } } } + } - let spend_vout_index = spend_vout_index.expect("Could not find output to spend"); - - // Create child transaction that spends from parent - // Leave some satoshis for fee (e.g., 1000 sats) - let child_output_value = spend_vout_value - 1000; - let child_output_btc = child_output_value as f64 / 100_000_000.0; - - let tx2_result = tester.node_client().call::( - "createrawtransaction", - &[ - serde_json::json!([{ - "txid": tx1_txid, - "vout": spend_vout_index - }]), - serde_json::json!({package_addr2.to_string(): child_output_btc}), - ], - )?; - let tx2_unsigned_hex = tx2_result.as_str().expect("raw tx hex").to_string(); + let spend_vout_index = spend_vout_index.expect("Could not find output to spend"); + + // Create child transaction that spends from parent + // Leave some satoshis for fee (e.g., 1000 sats) + let child_output_value = spend_vout_value - 1000; + let child_output_btc = child_output_value as f64 / 100_000_000.0; + + let tx2_result = tester.node_client().call::( + "createrawtransaction", + &[ + serde_json::json!([{ + "txid": tx1_txid, + "vout": spend_vout_index + }]), + serde_json::json!({package_addr2.to_string(): child_output_btc}), + ], + )?; + let tx2_unsigned_hex = tx2_result.as_str().expect("raw tx hex").to_string(); - // Sign the child transaction - // We need to provide the parent transaction's output details for signing - let tx2_sign_result = tester.node_client().call::( + // Sign the child transaction + // We need to provide the parent transaction's output details for signing + let tx2_sign_result = tester.node_client().call::( "signrawtransactionwithwallet", &[ serde_json::json!(tx2_unsigned_hex), @@ -300,210 +1117,255 @@ fn test_rest() -> Result<()> { }]) ], )?; - let tx2_signed_hex = tx2_sign_result["hex"] - .as_str() - .expect("signed tx hex") - .to_string(); - - // Debug: try calling submitpackage directly to see the result - eprintln!("Trying submitpackage directly with parent-child transactions..."); - let direct_result = tester.node_client().call::( - "submitpackage", - &[serde_json::json!([ - tx1_signed_hex.clone(), - tx2_signed_hex.clone() - ])], - ); - match direct_result { - Ok(result) => { - eprintln!("Direct submitpackage succeeded: {:#?}", result); - } - Err(e) => { - eprintln!("Direct submitpackage failed: {:?}", e); - } + let tx2_signed_hex = tx2_sign_result["hex"] + .as_str() + .expect("signed tx hex") + .to_string(); + + // Debug: try calling submitpackage directly to see the result + eprintln!("Trying submitpackage directly with parent-child transactions..."); + let direct_result = tester.node_client().call::( + "submitpackage", + &[serde_json::json!([ + tx1_signed_hex.clone(), + tx2_signed_hex.clone() + ])], + ); + match direct_result { + Ok(result) => { + eprintln!("Direct submitpackage succeeded: {:#?}", result); } + Err(e) => { + eprintln!("Direct submitpackage failed: {:?}", e); + } + } - // Now submit this transaction package via the package endpoint - let package_json = - serde_json::json!([tx1_signed_hex.clone(), tx2_signed_hex.clone()]).to_string(); - let package_result = ureq::post(&format!("http://{}/txs/package", rest_addr)) - .set("Content-Type", "application/json") - .send_string(&package_json); + // Now submit this transaction package via the package endpoint + let package_json = + serde_json::json!([tx1_signed_hex.clone(), tx2_signed_hex.clone()]).to_string(); + let package_result = ureq::post(&format!("http://{}/txs/package", rest_addr)) + .set("Content-Type", "application/json") + .send_string(&package_json); - let package_resp = package_result.unwrap(); - assert_eq!(package_resp.status(), 200); - let package_result = package_resp.into_json::()?; + let package_resp = package_result.unwrap(); + assert_eq!(package_resp.status(), 200); + let package_result = package_resp.into_json::()?; - // Verify the response structure - assert!(package_result["tx-results"].is_object()); - assert!(package_result["package_msg"].is_string()); + // Verify the response structure + assert!(package_result["tx-results"].is_object()); + assert!(package_result["package_msg"].is_string()); - let tx_results = package_result["tx-results"].as_object().unwrap(); - assert_eq!(tx_results.len(), 2); + let tx_results = package_result["tx-results"].as_object().unwrap(); + assert_eq!(tx_results.len(), 2); - // The transactions should be processed (whether accepted or rejected) - assert!(!tx_results.is_empty()); - } + // The transactions should be processed (whether accepted or rejected) + assert!(!tx_results.is_empty()); - // Elements-only tests - #[cfg(feature = "liquid")] - { - // Test confidential transactions - { - let (c_addr, uc_addr) = tester.ct_newaddress()?; - let txid = tester.send(&c_addr, "3.5 BTC".parse().unwrap())?; - tester.mine()?; - - let tx = get_json(&format!("/tx/{}", txid))?; - log::debug!("blinded tx = {:#?}", tx); - assert_eq!(tx["status"]["confirmed"].as_bool(), Some(true)); - let outs = tx["vout"].as_array().expect("array of outs"); - let vout = outs - .iter() - .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) - .expect("our output"); - assert!(vout["value"].is_null()); - assert!(vout["valuecommitment"].is_string()); - assert!(vout["assetcommitment"].is_string()); - } + rest_handle.stop(); + Ok(()) +} - // Test blinded asset issuance - { - use bitcoin::hashes::{sha256, Hash}; - let contract_hash = sha256::Hash::hash(&[0x11, 0x22, 0x33, 0x44]).to_string(); - let contract_hash = contract_hash.as_str(); - let issuance = tester.node_client().call::( - "issueasset", - &[1.5.into(), 0.into(), true.into(), contract_hash.into()], - )?; - tester.mine()?; - - let assetid = issuance["asset"].as_str().expect("asset id"); - let issuance_txid = issuance["txid"].as_str().expect("issuance txid"); - - // Test GET /asset/:assetid - let asset = get_json(&format!("/asset/{}", assetid))?; - let stats = &asset["chain_stats"]; - assert_eq!(asset["asset_id"].as_str(), Some(assetid)); - assert_eq!(asset["issuance_txin"]["txid"].as_str(), Some(issuance_txid)); - assert_eq!(asset["contract_hash"].as_str(), Some(contract_hash)); - assert_eq!(asset["status"]["confirmed"].as_bool(), Some(true)); - assert_eq!(stats["issuance_count"].as_u64(), Some(1)); - assert_eq!(stats["has_blinded_issuances"].as_bool(), Some(true)); - assert_eq!(stats["issued_amount"].as_u64(), Some(0)); - - // Test GET /tx/:txid for issuance tx - let issuance_tx = get_json(&format!("/tx/{}", issuance_txid))?; - let issuance_in_index = asset["issuance_txin"]["vin"].as_u64().unwrap(); - let issuance_in = &issuance_tx["vin"][issuance_in_index as usize]; - let issuance_data = &issuance_in["issuance"]; - assert_eq!(issuance_data["asset_id"].as_str(), Some(assetid)); - assert_eq!(issuance_data["is_reissuance"].as_bool(), Some(false)); - assert_eq!(issuance_data["contract_hash"].as_str(), Some(contract_hash)); - assert!(issuance_data["assetamount"].is_null()); - assert!(issuance_data["assetamountcommitment"].is_string()); - } +// Elements-only tests - // Test unblinded asset issuance - { - let issuance = tester - .node_client() - .call::("issueasset", &[1.5.into(), 0.into(), false.into()])?; - tester.mine()?; - let assetid = issuance["asset"].as_str().expect("asset id"); - let issuance_txid = issuance["txid"].as_str().expect("issuance txid"); - - // Test GET /asset/:assetid - let asset = get_json(&format!("/asset/{}", assetid))?; - let stats = &asset["chain_stats"]; - assert_eq!(stats["has_blinded_issuances"].as_bool(), Some(false)); - assert_eq!(stats["issued_amount"].as_u64(), Some(150000000)); - - // Test GET /tx/:txid for issuance tx - let issuance_tx = get_json(&format!("/tx/{}", issuance_txid))?; - let issuance_in_index = asset["issuance_txin"]["vin"].as_u64().unwrap(); - let issuance_in = &issuance_tx["vin"][issuance_in_index as usize]; - let issuance_data = &issuance_in["issuance"]; - assert_eq!(issuance_data["assetamount"].as_u64(), Some(150000000)); - assert!(issuance_data["assetamountcommitment"].is_null()); - } +#[cfg(feature = "liquid")] +#[test] +fn test_rest_liquid_confidential_tx() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); - // Test a regular (non-issuance) transaction sending an issued asset - { - let issuance = tester - .node_client() - .call::("issueasset", &[1.5.into(), 0.into(), false.into()])?; - let assetid = issuance["asset"].as_str().expect("asset id"); - tester.mine()?; - - let (c_addr, uc_addr) = tester.ct_newaddress()?; - - // With blinding off - let txid = tester.send_asset( - &uc_addr, - "0.3 BTC".parse().unwrap(), // not actually BTC, but this is what Amount expects - assetid.parse().unwrap(), - )?; - let tx = get_json(&format!("/tx/{}", txid))?; - let outs = tx["vout"].as_array().expect("array of outs"); - let vout = outs - .iter() - .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) - .expect("our output"); - assert_eq!(vout["asset"].as_str(), Some(assetid)); - assert_eq!(vout["value"].as_u64(), Some(30000000)); - - // With blinding on - let txid = tester.send_asset( - &c_addr, - "0.3 BTC".parse().unwrap(), - assetid.parse().unwrap(), - )?; - let tx = get_json(&format!("/tx/{}", txid))?; - let outs = tx["vout"].as_array().expect("array of outs"); - let vout = outs - .iter() - .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) - .expect("our output"); - assert!(vout["asset"].is_null()); - assert!(vout["value"].is_null()); - assert!(vout["assetcommitment"].is_string()); - assert!(vout["valuecommitment"].is_string()); - } + let (c_addr, uc_addr) = tester.ct_newaddress()?; + let txid = tester.send(&c_addr, "3.5 BTC".parse().unwrap())?; + tester.mine()?; - // Test GET /block/:hash - { - let block1_hash = get_plain("/block-height/1")?; - let block1 = get_json(&format!("/block/{}", block1_hash))?; - - // No PoW-related stuff - assert!(block1["bits"].is_null()); - assert!(block1["nonce"].is_null()); - assert!(block1["difficulty"].is_null()); - - // Dynamic Federations (dynafed) fields - // Block #1 should have the Full dynafed params - // See https://docs.rs/elements/latest/elements/dynafed/enum.Params.html - assert!(block1["ext"]["current"]["signblockscript"].is_string()); - assert!(block1["ext"]["current"]["fedpegscript"].is_string()); - assert!(block1["ext"]["current"]["fedpeg_program"].is_string()); - assert!(block1["ext"]["current"]["signblock_witness_limit"].is_u64()); - assert!(block1["ext"]["current"]["extension_space"].is_array()); - assert!(block1["ext"]["proposed"].is_object()); - assert!(block1["ext"]["signblock_witness"].is_array()); - - // Block #2 should have the Compact params - let block2_hash = get_plain("/block-height/2")?; - let block2 = get_json(&format!("/block/{}", block2_hash))?; - assert!(block2["ext"]["current"]["signblockscript"].is_string()); - assert!(block2["ext"]["current"]["signblock_witness_limit"].is_u64()); - // With the `elided_root` in place of `fedpegscript`/`fedpeg_program`/`extension_space`` - assert!(block2["ext"]["current"]["elided_root"].is_string()); - assert!(block2["ext"]["current"]["fedpegscript"].is_null()); - assert!(block2["ext"]["current"]["fedpeg_program"].is_null()); - assert!(block2["ext"]["current"]["extension_space"].is_null()); - } - } + let tx = get_json(rest_addr, &format!("/tx/{}", txid))?; + log::debug!("blinded tx = {:#?}", tx); + assert_eq!(tx["status"]["confirmed"].as_bool(), Some(true)); + assert_eq!(tx["status"]["block_height"].as_u64(), Some(102)); + assert!(tx["status"]["block_hash"].is_string()); + let outs = tx["vout"].as_array().expect("array of outs"); + let vout = outs + .iter() + .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) + .expect("our output"); + assert!(vout["value"].is_null()); + assert!(vout["valuecommitment"].is_string()); + assert!(vout["assetcommitment"].is_string()); + assert!(vout["scriptpubkey_type"].is_string()); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(feature = "liquid")] +#[test] +fn test_rest_liquid_blinded_issuance() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + use bitcoin::hashes::{sha256, Hash}; + let contract_hash = sha256::Hash::hash(&[0x11, 0x22, 0x33, 0x44]).to_string(); + let contract_hash = contract_hash.as_str(); + let issuance = tester.node_client().call::( + "issueasset", + &[1.5.into(), 0.into(), true.into(), contract_hash.into()], + )?; + tester.mine()?; + + let assetid = issuance["asset"].as_str().expect("asset id"); + let issuance_txid = issuance["txid"].as_str().expect("issuance txid"); + + // Test GET /asset/:assetid + let asset = get_json(rest_addr, &format!("/asset/{}", assetid))?; + let stats = &asset["chain_stats"]; + assert_eq!(asset["asset_id"].as_str(), Some(assetid)); + assert_eq!(asset["issuance_txin"]["txid"].as_str(), Some(issuance_txid)); + assert_eq!(asset["contract_hash"].as_str(), Some(contract_hash)); + assert_eq!(asset["status"]["confirmed"].as_bool(), Some(true)); + assert_eq!(stats["issuance_count"].as_u64(), Some(1)); + assert_eq!(stats["has_blinded_issuances"].as_bool(), Some(true)); + assert_eq!(stats["issued_amount"].as_u64(), Some(0)); + + // Test GET /tx/:txid for issuance tx + let issuance_tx = get_json(rest_addr, &format!("/tx/{}", issuance_txid))?; + let issuance_in_index = asset["issuance_txin"]["vin"].as_u64().unwrap(); + let issuance_in = &issuance_tx["vin"][issuance_in_index as usize]; + let issuance_data = &issuance_in["issuance"]; + assert_eq!(issuance_data["asset_id"].as_str(), Some(assetid)); + assert_eq!(issuance_data["is_reissuance"].as_bool(), Some(false)); + assert_eq!(issuance_data["contract_hash"].as_str(), Some(contract_hash)); + assert!(issuance_data["asset_entropy"].is_string()); + assert!(issuance_data["assetamount"].is_null()); + assert!(issuance_data["assetamountcommitment"].is_string()); + + // Verify asset stats + // TODO properly validate asset stats + assert_eq!(stats["tx_count"].as_u64(), Some(1)); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(feature = "liquid")] +#[test] +fn test_rest_liquid_unblinded_issuance() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let issuance = tester + .node_client() + .call::("issueasset", &[1.5.into(), 0.into(), false.into()])?; + tester.mine()?; + let assetid = issuance["asset"].as_str().expect("asset id"); + let issuance_txid = issuance["txid"].as_str().expect("issuance txid"); + + // Test GET /asset/:assetid + let asset = get_json(rest_addr, &format!("/asset/{}", assetid))?; + let stats = &asset["chain_stats"]; + assert_eq!(stats["has_blinded_issuances"].as_bool(), Some(false)); + assert_eq!(stats["issued_amount"].as_u64(), Some(150000000)); + assert_eq!(stats["issuance_count"].as_u64(), Some(1)); + assert_eq!(stats["tx_count"].as_u64(), Some(1)); + + // Test GET /tx/:txid for issuance tx + let issuance_tx = get_json(rest_addr, &format!("/tx/{}", issuance_txid))?; + let issuance_in_index = asset["issuance_txin"]["vin"].as_u64().unwrap(); + let issuance_in = &issuance_tx["vin"][issuance_in_index as usize]; + let issuance_data = &issuance_in["issuance"]; + assert_eq!(issuance_data["assetamount"].as_u64(), Some(150000000)); + assert!(issuance_data["assetamountcommitment"].is_null()); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(feature = "liquid")] +#[test] +fn test_rest_liquid_asset_transfer() -> Result<()> { + let (rest_handle, rest_addr, mut tester) = common::init_rest_tester().unwrap(); + + let issuance = tester + .node_client() + .call::("issueasset", &[1.5.into(), 0.into(), false.into()])?; + let assetid = issuance["asset"].as_str().expect("asset id"); + tester.mine()?; + + let (c_addr, uc_addr) = tester.ct_newaddress()?; + + // With blinding off + let txid = tester.send_asset( + &uc_addr, + "0.3 BTC".parse().unwrap(), // not actually BTC, but this is what Amount expects + assetid.parse().unwrap(), + )?; + let tx = get_json(rest_addr, &format!("/tx/{}", txid))?; + let outs = tx["vout"].as_array().expect("array of outs"); + let vout = outs + .iter() + .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) + .expect("our output"); + assert_eq!(vout["asset"].as_str(), Some(assetid)); + assert_eq!(vout["value"].as_u64(), Some(30000000)); + assert!(vout["scriptpubkey_type"].is_string()); + assert_eq!( + vout["scriptpubkey_address"].as_str(), + Some(uc_addr.to_string().as_str()) + ); + + // With blinding on + let txid = tester.send_asset( + &c_addr, + "0.3 BTC".parse().unwrap(), + assetid.parse().unwrap(), + )?; + let tx = get_json(rest_addr, &format!("/tx/{}", txid))?; + let outs = tx["vout"].as_array().expect("array of outs"); + let vout = outs + .iter() + .find(|vout| vout["scriptpubkey_address"].as_str() == Some(&uc_addr.to_string())) + .expect("our output"); + assert!(vout["asset"].is_null()); + assert!(vout["value"].is_null()); + assert!(vout["assetcommitment"].is_string()); + assert!(vout["valuecommitment"].is_string()); + + rest_handle.stop(); + Ok(()) +} + +#[cfg(feature = "liquid")] +#[test] +fn test_rest_liquid_block() -> Result<()> { + let (rest_handle, rest_addr, _tester) = common::init_rest_tester().unwrap(); + + // Test GET /block/:hash + let block1_hash = get_plain(rest_addr, "/block-height/1")?; + let block1 = get_json(rest_addr, &format!("/block/{}", block1_hash))?; + + // No PoW-related stuff + assert!(block1["bits"].is_null()); + assert!(block1["nonce"].is_null()); + assert!(block1["difficulty"].is_null()); + + // TODO properly validate dynafed parameters in first and second blocks + // Dynamic Federations (dynafed) fields + // Block #1 should have the Full dynafed params + // See https://docs.rs/elements/latest/elements/dynafed/enum.Params.html + assert!(block1["ext"]["current"]["signblockscript"].is_string()); + assert!(block1["ext"]["current"]["fedpegscript"].is_string()); + assert!(block1["ext"]["current"]["fedpeg_program"].is_string()); + assert!(block1["ext"]["current"]["signblock_witness_limit"].is_u64()); + assert!(block1["ext"]["proposed"].is_object()); + // TODO + + assert!(block1["ext"]["signblock_witness"].is_array()); + + // Block #2 should have the Compact params + let block2_hash = get_plain(rest_addr, "/block-height/2")?; + let block2 = get_json(rest_addr, &format!("/block/{}", block2_hash))?; + assert!(block2["ext"]["current"]["signblockscript"].is_string()); + assert!(block2["ext"]["current"]["signblock_witness_limit"].is_u64()); + // With the `elided_root` in place of `fedpegscript`/`fedpeg_program`/`extension_space`` + assert!(block2["ext"]["current"]["elided_root"].is_string()); + assert!(block2["ext"]["current"]["fedpegscript"].is_null()); + assert!(block2["ext"]["current"]["fedpeg_program"].is_null()); + assert!(block2["ext"]["current"]["extension_space"].is_null()); rest_handle.stop(); Ok(())