Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: polish first impression / contact with hord #137

Merged
merged 3 commits into from
Jul 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 31 additions & 30 deletions components/hord-cli/src/archive/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ use chainhook_sdk::types::BitcoinNetwork;
use chainhook_sdk::utils::Context;
use flate2::read::GzDecoder;
use futures_util::StreamExt;
use progressing::mapping::Bar as MappingBar;
use progressing::Baring;
use std::fs;
use std::io::{self, Cursor};
use std::io::{Read, Write};
use progressing::Baring;
use progressing::mapping::Bar as MappingBar;

pub fn default_sqlite_file_path(_network: &BitcoinNetwork) -> String {
format!("hord.sqlite").to_lowercase()
Expand All @@ -18,7 +18,7 @@ pub fn default_sqlite_sha_file_path(_network: &BitcoinNetwork) -> String {
format!("hord.sqlite.sha256").to_lowercase()
}

pub async fn download_sqlite_file(config: &Config, ctx: &Context) -> Result<(), String> {
pub async fn download_sqlite_file(config: &Config, _ctx: &Context) -> Result<(), String> {
let mut destination_path = config.expected_cache_path();
std::fs::create_dir_all(&destination_path).unwrap_or_else(|e| {
println!("{}", e.to_string());
Expand Down Expand Up @@ -61,29 +61,35 @@ pub async fn download_sqlite_file(config: &Config, ctx: &Context) -> Result<(),
});

if res.status() == reqwest::StatusCode::OK {
let mut progress_bar = MappingBar::with_range(0i64, 4_800_001_704);
progress_bar.set_len(40);
info!(
ctx.expect_logger(),
"{}", progress_bar
);
let limit = 5_400_000_000;
let mut progress_bar = MappingBar::with_range(0i64, limit);
progress_bar.set_len(60);
let mut stdout = std::io::stdout();
print!("{}", progress_bar);
let _ = stdout.flush();
let mut stream = res.bytes_stream();
let mut progress = 0;
let mut steps = 0;
while let Some(item) = stream.next().await {
let chunk = item.or(Err(format!("Error while downloading file")))?;
progress += chunk.len() as i64;
steps += chunk.len() as i64;
if steps > 5_000_000 {
steps = 0;
}
progress_bar.set(progress);
if progress_bar.has_progressed_significantly() {
progress_bar.remember_significant_progress();
info!(
ctx.expect_logger(),
"{}", progress_bar
);
if steps == 0 {
print!("\r{}", progress_bar);
let _ = stdout.flush();
}
tx.send_async(chunk.to_vec())
.await
.map_err(|e| format!("unable to download stacks event: {}", e.to_string()))?;
}
progress_bar.set(limit);
print!("\r{}", progress_bar);
let _ = stdout.flush();
println!();
drop(tx);
}

Expand Down Expand Up @@ -152,25 +158,20 @@ pub async fn download_ordinals_dataset_if_required(config: &Config, ctx: &Contex
(Ok(local), Ok(remote_response)) => {
let cache_not_expired = remote_response.starts_with(&local[0..32]) == false;
if cache_not_expired {
info!(
ctx.expect_logger(),
"More recent hord.sqlite file detected"
);
info!(ctx.expect_logger(), "More recent hord.sqlite file detected");
}
cache_not_expired == false
}
(_, _) => {
match std::fs::metadata(&sqlite_file_path) {
Ok(_) => false,
_ => {
info!(
ctx.expect_logger(),
"Unable to retrieve hord.sqlite file locally"
);
true
}
(_, _) => match std::fs::metadata(&sqlite_file_path) {
Ok(_) => false,
_ => {
info!(
ctx.expect_logger(),
"Unable to retrieve hord.sqlite file locally"
);
true
}
}
},
};
if should_download {
info!(ctx.expect_logger(), "Downloading {}", url);
Expand Down
24 changes: 12 additions & 12 deletions components/hord-cli/src/cli/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,9 @@ use crate::service::Service;
use crate::db::{
delete_data_in_hord_db, find_all_inscription_transfers, find_all_inscriptions_in_block,
find_all_transfers_in_block, find_inscription_with_id, find_last_block_inserted,
find_latest_inscription_block_height, find_lazy_block_at_block_height,
initialize_hord_db, insert_entry_in_locations,
open_readonly_hord_db_conn, open_readonly_hord_db_conn_rocks_db, open_readwrite_hord_db_conn,
open_readwrite_hord_db_conn_rocks_db, rebuild_rocks_db,
find_latest_inscription_block_height, find_lazy_block_at_block_height, initialize_hord_db,
insert_entry_in_locations, open_readonly_hord_db_conn, open_readonly_hord_db_conn_rocks_db,
open_readwrite_hord_db_conn, open_readwrite_hord_db_conn_rocks_db, rebuild_rocks_db,
remove_entries_from_locations_at_block_height, retrieve_satoshi_point_using_lazy_storage,
};
use crate::hord::{
Expand Down Expand Up @@ -457,7 +456,10 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {

if let Some(ref post_to) = cmd.post_to {
info!(ctx.expect_logger(), "A fully synchronized bitcoind node is required for retrieving inscriptions content.");
info!(ctx.expect_logger(), "Checking {}...", config.network.bitcoind_rpc_url);
info!(
ctx.expect_logger(),
"Checking {}...", config.network.bitcoind_rpc_url
);
let tip = check_bitcoind_connection(&config).await?;
if tip < cmd.end_block {
error!(ctx.expect_logger(), "Unable to scan block range [{}, {}]: underlying bitcoind synchronized until block {} ", cmd.start_block, cmd.end_block, tip);
Expand Down Expand Up @@ -513,14 +515,17 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
"Inscriptions revealed: {}, inscriptions transferred: {total_transfers}",
inscriptions.len()
);
println!("-----");
println!("-----");
}
}
}
}
Command::Scan(ScanCommand::Inscription(cmd)) => {
let config: Config =
Config::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;

let _ = download_ordinals_dataset_if_required(&config, ctx).await;

let inscriptions_db_conn =
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)?;
let (inscription, block_height) =
Expand Down Expand Up @@ -895,12 +900,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
"Cleaning hord_db: {} blocks dropped",
cmd.end_block - cmd.start_block + 1
);
} // HordDbCommand::Patch(_cmd) => {
// unimplemented!()
// }
// HordDbCommand::Migrate(_cmd) => {
// unimplemented!()
// }
}
}
Ok(())
}
Expand Down
17 changes: 8 additions & 9 deletions components/hord-cli/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,9 @@ impl Config {
if let Some(dst) = source.tsv_file_path.take() {
let mut file_path = PathBuf::new();
file_path.push(dst);
event_sources.push(EventSourceConfig::OrdinalsSqlitePath(PathConfig { file_path }));
event_sources.push(EventSourceConfig::OrdinalsSqlitePath(PathConfig {
file_path,
}));
continue;
}
if let Some(file_url) = source.tsv_file_url.take() {
Expand Down Expand Up @@ -228,9 +230,8 @@ impl Config {
pub fn is_initial_ingestion_required(&self) -> bool {
for source in self.event_sources.iter() {
match source {
EventSourceConfig::OrdinalsSqlitePath(_) | EventSourceConfig::OrdinalsSqliteUrl(_) => {
return true
}
EventSourceConfig::OrdinalsSqlitePath(_)
| EventSourceConfig::OrdinalsSqliteUrl(_) => return true,
}
}
return false;
Expand Down Expand Up @@ -387,11 +388,9 @@ impl Config {
working_dir: default_cache_path(),
},
http_api: PredicatesApi::Off,
event_sources: vec![
EventSourceConfig::OrdinalsSqliteUrl(UrlConfig {
file_url: DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE.into(),
}),
],
event_sources: vec![EventSourceConfig::OrdinalsSqliteUrl(UrlConfig {
file_url: DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE.into(),
})],
limits: LimitsConfig {
max_number_of_bitcoin_predicates: BITCOIN_MAX_PREDICATE_REGISTRATION,
max_number_of_concurrent_bitcoin_scans: BITCOIN_SCAN_THREAD_POOL_SIZE,
Expand Down
5 changes: 2 additions & 3 deletions components/hord-cli/src/db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ use std::{

use chainhook_sdk::{
indexer::bitcoin::{
build_http_client, download_block_with_retry,
retrieve_block_hash_with_retry,
build_http_client, download_block_with_retry, retrieve_block_hash_with_retry,
},
types::{
BitcoinBlockData, BlockIdentifier, OrdinalInscriptionRevealData,
Expand Down Expand Up @@ -1386,7 +1385,7 @@ pub fn retrieve_satoshi_point_using_lazy_storage(
// evaluate exit condition: did we reach the **final** coinbase transaction
if coinbase_txid.eq(&txid) {
let subsidy = Height(ordinal_block_number.into()).subsidy();
if ordinal_offset < subsidy {
if ordinal_offset < subsidy {
// Great!
break;
}
Expand Down