From 1aae6433c19aa1849a18660a8559e51864eaf5da Mon Sep 17 00:00:00 2001 From: pawurb Date: Tue, 23 Jan 2024 14:47:30 +0100 Subject: [PATCH] dont panic --- Cargo.toml | 3 +- README.md | 66 +++++++-------- src/lib.rs | 239 +++++++++++++++++++++++++++++------------------------ 3 files changed, 165 insertions(+), 143 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ea23f57..c2bb1b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "pg-extras" readme = "README.md" repository = "https://github.com/pawurb/rust-pg-extras" -version = "0.0.3" +version = "0.1.0" exclude = [ "docker-compose.yml.sample", @@ -20,3 +20,4 @@ postgres = "0.19.7" prettytable-rs = "^0.10" rust_decimal = {version = "1.32", features = ["db-postgres"]} rust_decimal_macros = "1.33" +thiserror = "1.0" diff --git a/README.md b/README.md index 9afda80..aefd904 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ You can check if it is enabled in your database by running: ```rust use pg_extras::{render_table, extensions} -render_table(extensions()); +render_table(extensions()?); ``` You should see the similar line in the output: @@ -63,7 +63,7 @@ You can run queries using a Rust API to display an ASCCI table with results: ```rust use pg_extras::{render_table, cache_hit} -render_table(cache_hit(None)); +render_table(cache_hit(None)?); ``` ```bash @@ -82,7 +82,7 @@ Alternatively you can work directly with returned structs: ```rust use pg_extras::{render_table, cache_hit, CacheHit} -let cache_hit_res: Vec = cache_hit(None); +let cache_hit_res: Vec = cache_hit(None)?; println!("{:?}", cache_hit_res); // [CacheHit { name: "index hit rate", ratio: 0.9779... }, CacheHit { name: "table hit rate", ratio: 0.9672... }] @@ -92,7 +92,7 @@ println!("{:?}", cache_hit_res); Some methods accept params allowing you to customize queries: ```rust -cache_hit(Some("other_schema".to_string)); +cache_hit(Some("other_schema".to_string))?; ``` You can customize the default `public` schema by setting `ENV['PG_EXTRAS_SCHEMA']` value. @@ -107,7 +107,7 @@ struct CacheHit { ratio: Decimal, } -cache_hit(schema: Option) -> Vec +cache_hit(schema: Option) -> Result, PgExtrasError> name | ratio ----------------+------------------------ @@ -131,7 +131,7 @@ struct IndexCacheHit { ratio: String, } -index_cache_hit(schema: Option) -> Vec +index_cache_hit(schema: Option) -> Result, PgExtrasError> | name | buffer_hits | block_reads | total_read | ratio | +-----------------------+-------------+-------------+------------+-------------------+ @@ -156,7 +156,7 @@ struct TableCacheHit { ratio: String, } -table_cache_hit() -> Vec +table_cache_hit() -> Result, PgExtrasError> | name | buffer_hits | block_reads | total_read | ratio | +-----------------------+-------------+-------------+------------+-------------------+ @@ -180,7 +180,7 @@ struct DbSetting { short_desc: String, } -db_settings() -> Vec +db_settings() -> Result, PgExtrasError> name | setting | unit | ------------------------------+---------+------+ @@ -203,7 +203,7 @@ struct SslUsed { ssl_used: bool, } -ssl_used() -> Vec +ssl_used() -> Result, PgExtrasError> | ssl_is_used | +---------------------------------+ @@ -222,7 +222,7 @@ struct IndexUsage { rows_in_table: i64, } -index_usage(schema: Option) -> Vec +index_usage(schema: Option) -> Result, PgExtrasError> relname | percent_of_times_index_used | rows_in_table ---------------------+-----------------------------+--------------- @@ -250,7 +250,7 @@ struct Locks { application: String, } -locks() -> Vec +locks() -> Result, PgExtrasError> procpid | relname | transactionid | granted | query_snippet | mode | age | application | ---------+---------+---------------+---------+-----------------------+------------------------------------------------------ @@ -281,7 +281,7 @@ struct AllLocks { application: String, } -all_locks() -> Vec +all_locks() -> Result, PgExtrasError> ``` @@ -298,7 +298,7 @@ struct Outliers { query: String, } -outliers() -> Vec +outliers() -> Result, PgExtrasError> query | exec_time | prop_exec_time | ncalls | sync_io_time -----------------------------------------+------------------+----------------+-------------+-------------- @@ -329,7 +329,7 @@ struct Calls { sync_io_time: Interval, } -calls(limit: Option) -> Vec +calls(limit: Option) -> Result, PgExtrasError> qry | exec_time | prop_exec_time | ncalls | sync_io_time -----------------------------------------+------------------+----------------+-------------+-------------- @@ -361,7 +361,7 @@ struct Blocking { blocking_sql_app: String, } -blocking(limit: Option) -> Vec +blocking(limit: Option) -> Result, PgExtrasError> blocked_pid | blocking_statement | blocking_duration | blocking_pid | blocked_statement | blocked_duration -------------+--------------------------+-------------------+--------------+------------------------------------------------------------------------------------+------------------ @@ -380,7 +380,7 @@ struct TotalIndexSize { size: String, } -total_index_size() -> Vec +total_index_size() -> Result, PgExtrasError> size ------- @@ -399,7 +399,7 @@ This command displays the total size of all indexes on the database, in MB. It i schema: String, } -index_size() -> Vec +index_size() -> Result, PgExtrasError> name | size | schema | ---------------------------------------------------------------+------------------- @@ -427,7 +427,7 @@ struct TableSize { schema: String, } -table_size() -> Vec +table_size() -> Result, PgExtrasError> name | size | schema | ---------------------------------------------------------------+------------------- @@ -449,7 +449,7 @@ TableIndexesSize { index_size: String, } -table_indexes_size(schema: Option) -> Vec +table_indexes_size(schema: Option) -> Result, PgExtrasError> table | indexes_size ---------------------------------------------------------------+-------------- @@ -471,7 +471,7 @@ struct TotalTableSize { size: String, } -total_table_size() -> Vec +total_table_size() -> Result, PgExtrasError> name | size ---------------------------------------------------------------+--------- @@ -495,7 +495,7 @@ struct UnusedIndexes { index_scans: i64, } -unused_indexes(schema: Option) -> Vec +unused_indexes(schema: Option) -> Result, PgExtrasError> table | index | index_size | index_scans ---------------------+--------------------------------------------+------------+------------- @@ -520,7 +520,7 @@ struct DuplicateIndexes { idx4: String, } -duplicate_indexes() -> Vec +duplicate_indexes() -> Result, PgExtrasError> | size | idx1 | idx2 | idx3 | idx4 | +------------+--------------+----------------+----------+-----------+ @@ -544,7 +544,7 @@ struct NullIndexes { schema: String, } -null_indexes(min_relation_size_mb: Option) -> Vec +null_indexes(min_relation_size_mb: Option) -> Result, PgExtrasError> oid | index | index_size | unique | indexed_column | null_frac | expected_saving ---------+--------------------+------------+--------+----------------+-----------+----------------- @@ -566,7 +566,7 @@ struct SeqScans { count: i64, } -seq_scans(schema: Option) -> Vec +seq_scans(schema: Option) -> Result, PgExtrasError> name | count -----------------------------------+---------- @@ -594,7 +594,7 @@ struct LongRunningQueries { query: String, } -long_running_queries() -> Vec +long_running_queries() -> Result, PgExtrasError> pid | duration | query -------+-----------------+--------------------------------------------------------------------------------------- @@ -614,7 +614,7 @@ struct RecordsRank { esiimated_count: i64, } -records_rank(schema: Option) -> Vec +records_rank(schema: Option) -> Result, PgExtrasError> name | estimated_count -----------------------------------+----------------- @@ -640,7 +640,7 @@ struct Bloat { waste: String, } -bloat() -> Vec +bloat() -> Result, PgExtrasError> type | schemaname | object_name | bloat | waste -------+------------+-------------------------------+-------+---------- @@ -670,7 +670,7 @@ struct VacuumStats { expect_autovacuum: String, } -vacuum_stats() -> Vec +vacuum_stats() -> Result, PgExtrasError> schema | table | last_vacuum | last_autovacuum | rowcount | dead_rowcount | autovacuum_threshold | expect_autovacuum --------+-----------------------+-------------+------------------+----------------+----------------+----------------------+------------------- @@ -694,7 +694,7 @@ struct BuffercacheStats { percent_of_relation: Decimal, } -buffercache_stats() -> Vec +buffercache_stats() -> Result, PgExtrasError> ``` This command shows the relations buffered in database share buffer, ordered by percentage taken. It also shows that how much of the whole relation is buffered. @@ -707,7 +707,7 @@ struct BuffercacheUsage { buffers: i64, } -buffercache_usage() -> Vec +buffercache_usage() -> Result, PgExtrasError> ``` This command calculates how many blocks from which table are currently cached. @@ -722,7 +722,7 @@ struct Extensions { comment: String, } -extensions() -> Vec +extensions() -> Result, PgExtrasError> ``` @@ -737,7 +737,7 @@ struct Connections { client_addr: String, } -connections() -> Vec +connections() -> Result, PgExtrasError> +----------------------------------------------------------------+ | Returns the list of all active database connections | @@ -759,7 +759,7 @@ struct Mandelbrot { array_to_string: String, } -mandelbrot() -> Vec +mandelbrot() -> Result, PgExtrasError> ``` diff --git a/src/lib.rs b/src/lib.rs index 5a458d1..17abeeb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -35,6 +35,7 @@ pub use structs::total_index_size::TotalIndexSize; pub use structs::total_table_size::TotalTableSize; pub use structs::unused_indexes::UnusedIndexes; pub use structs::vacuum_stats::VacuumStats; +use thiserror::Error; #[macro_use] extern crate prettytable; @@ -50,186 +51,191 @@ pub fn render_table(items: Vec) { table.printstd(); } -pub fn bloat() -> Vec { +pub fn bloat() -> Result, PgExtrasError> { let query = read_file(Bloat::FILE_NAME); - get_rows(query).iter().map(Bloat::new).collect() + Ok(get_rows(query)?.iter().map(Bloat::new).collect()) } -pub fn blocking(limit: Option) -> Vec { +pub fn blocking(limit: Option) -> Result, PgExtrasError> { let limit = limit.unwrap_or("10".to_string()); let query = read_file(Blocking::FILE_NAME).replace("%{limit}", limit.as_str()); - get_rows(&query).iter().map(Blocking::new).collect() + Ok(get_rows(&query)?.iter().map(Blocking::new).collect()) } -pub fn calls(limit: Option) -> Vec { +pub fn calls(limit: Option) -> Result, PgExtrasError> { let limit = limit.unwrap_or("10".to_string()); let query = read_file("calls").replace("%{limit}", limit.as_str()); - get_rows(&query).iter().map(Calls::new).collect() + Ok(get_rows(&query)?.iter().map(Calls::new).collect()) } -pub fn extensions() -> Vec { +pub fn extensions() -> Result, PgExtrasError> { let query = read_file(Extensions::FILE_NAME); - get_rows(query).iter().map(Extensions::new).collect() + Ok(get_rows(query)?.iter().map(Extensions::new).collect()) } -pub fn table_cache_hit() -> Vec { +pub fn table_cache_hit() -> Result, PgExtrasError> { let query = read_file(TableCacheHit::FILE_NAME); - get_rows(query).iter().map(TableCacheHit::new).collect() + Ok(get_rows(query)?.iter().map(TableCacheHit::new).collect()) } -pub fn tables(schema: Option) -> Vec { +pub fn tables(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(Tables::FILE_NAME).replace("%{schema}", &schema_name); - get_rows(&query).iter().map(Tables::new).collect() + Ok(get_rows(&query)?.iter().map(Tables::new).collect()) } -pub fn index_cache_hit(schema: Option) -> Vec { +pub fn index_cache_hit(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(IndexCacheHit::FILE_NAME).replace("%{schema}", &schema_name); - get_rows(&query).iter().map(IndexCacheHit::new).collect() + Ok(get_rows(&query)?.iter().map(IndexCacheHit::new).collect()) } -pub fn indexes() -> Vec { +pub fn indexes() -> Result, PgExtrasError> { let query = read_file(Indexes::FILE_NAME); - get_rows(query).iter().map(Indexes::new).collect() + Ok(get_rows(query)?.iter().map(Indexes::new).collect()) } -pub fn index_size() -> Vec { +pub fn index_size() -> Result, PgExtrasError> { let query = read_file(IndexSize::FILE_NAME); - get_rows(query).iter().map(IndexSize::new).collect() + Ok(get_rows(query)?.iter().map(IndexSize::new).collect()) } -pub fn index_usage(schema: Option) -> Vec { +pub fn index_usage(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(IndexUsage::FILE_NAME).replace("%{schema}", &schema_name); - get_rows(&query).iter().map(IndexUsage::new).collect() + Ok(get_rows(&query)?.iter().map(IndexUsage::new).collect()) } -pub fn index_scans(schema: Option) -> Vec { +pub fn index_scans(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(IndexScans::FILE_NAME).replace("%{schema}", &schema_name); - get_rows(&query).iter().map(IndexScans::new).collect() + Ok(get_rows(&query)?.iter().map(IndexScans::new).collect()) } -pub fn null_indexes(min_relation_size_mb: Option) -> Vec { +pub fn null_indexes( + min_relation_size_mb: Option, +) -> Result, PgExtrasError> { let min_relation_size_mb = min_relation_size_mb.unwrap_or("0".to_string()); let query = read_file(NullIndexes::FILE_NAME).replace("%{min_relation_size_mb}", &min_relation_size_mb); - get_rows(&query).iter().map(NullIndexes::new).collect() + Ok(get_rows(&query)?.iter().map(NullIndexes::new).collect()) } -pub fn locks() -> Vec { +pub fn locks() -> Result, PgExtrasError> { let query = read_file(Locks::FILE_NAME); - get_rows(query).iter().map(Locks::new).collect() + Ok(get_rows(query)?.iter().map(Locks::new).collect()) } -pub fn all_locks() -> Vec { +pub fn all_locks() -> Result, PgExtrasError> { let query = read_file(AllLocks::FILE_NAME); - get_rows(query).iter().map(AllLocks::new).collect() + Ok(get_rows(query)?.iter().map(AllLocks::new).collect()) } -pub fn long_running_queries() -> Vec { +pub fn long_running_queries() -> Result, PgExtrasError> { let query = read_file(LongRunningQueries::FILE_NAME); - get_rows(query) + Ok(get_rows(query)? .iter() .map(LongRunningQueries::new) - .collect() + .collect()) } -pub fn mandelbrot() -> Vec { +pub fn mandelbrot() -> Result, PgExtrasError> { let query = read_file(Mandelbrot::FILE_NAME); - get_rows(query).iter().map(Mandelbrot::new).collect() + Ok(get_rows(query)?.iter().map(Mandelbrot::new).collect()) } -pub fn outliers() -> Vec { +pub fn outliers() -> Result, PgExtrasError> { let query = read_file(Outliers::FILE_NAME); - get_rows(query).iter().map(Outliers::new).collect() + Ok(get_rows(query)?.iter().map(Outliers::new).collect()) } -pub fn records_rank(schema: Option) -> Vec { +pub fn records_rank(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(RecordsRank::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(RecordsRank::new).collect() + Ok(get_rows(&query)?.iter().map(RecordsRank::new).collect()) } -pub fn seq_scans(schema: Option) -> Vec { +pub fn seq_scans(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(SeqScans::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(SeqScans::new).collect() + Ok(get_rows(&query)?.iter().map(SeqScans::new).collect()) } -pub fn table_index_scans(schema: Option) -> Vec { +pub fn table_index_scans(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(TableIndexScans::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(TableIndexScans::new).collect() + Ok(get_rows(&query)?.iter().map(TableIndexScans::new).collect()) } -pub fn table_indexes_size(schema: Option) -> Vec { +pub fn table_indexes_size(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(TableIndexesSize::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(TableIndexesSize::new).collect() + Ok(get_rows(&query)? + .iter() + .map(TableIndexesSize::new) + .collect()) } -pub fn table_size() -> Vec { +pub fn table_size() -> Result, PgExtrasError> { let query = read_file(TableSize::FILE_NAME); - get_rows(query).iter().map(TableSize::new).collect() + Ok(get_rows(query)?.iter().map(TableSize::new).collect()) } -pub fn total_index_size() -> Vec { +pub fn total_index_size() -> Result, PgExtrasError> { let query = read_file(TotalIndexSize::FILE_NAME); - get_rows(query).iter().map(TotalIndexSize::new).collect() + Ok(get_rows(query)?.iter().map(TotalIndexSize::new).collect()) } -pub fn total_table_size() -> Vec { +pub fn total_table_size() -> Result, PgExtrasError> { let query = read_file(TotalTableSize::FILE_NAME); - get_rows(query).iter().map(TotalTableSize::new).collect() + Ok(get_rows(query)?.iter().map(TotalTableSize::new).collect()) } -pub fn unused_indexes(schema: Option) -> Vec { +pub fn unused_indexes(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(UnusedIndexes::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(UnusedIndexes::new).collect() + Ok(get_rows(&query)?.iter().map(UnusedIndexes::new).collect()) } -pub fn duplicate_indexes() -> Vec { +pub fn duplicate_indexes() -> Result, PgExtrasError> { let query = read_file(DuplicateIndexes::FILE_NAME); - get_rows(query).iter().map(DuplicateIndexes::new).collect() + Ok(get_rows(query)?.iter().map(DuplicateIndexes::new).collect()) } -pub fn vacuum_stats() -> Vec { +pub fn vacuum_stats() -> Result, PgExtrasError> { let query = read_file(VacuumStats::FILE_NAME); - get_rows(query).iter().map(VacuumStats::new).collect() + Ok(get_rows(query)?.iter().map(VacuumStats::new).collect()) } -pub fn buffercache_stats() -> Vec { +pub fn buffercache_stats() -> Result, PgExtrasError> { let query = read_file(BuffercacheStats::FILE_NAME); - get_rows(query).iter().map(BuffercacheStats::new).collect() + Ok(get_rows(query)?.iter().map(BuffercacheStats::new).collect()) } -pub fn buffercache_usage() -> Vec { +pub fn buffercache_usage() -> Result, PgExtrasError> { let query = read_file(BuffercacheUsage::FILE_NAME); - get_rows(query).iter().map(BuffercacheUsage::new).collect() + Ok(get_rows(query)?.iter().map(BuffercacheUsage::new).collect()) } -pub fn ssl_used() -> Vec { +pub fn ssl_used() -> Result, PgExtrasError> { let query = read_file(SslUsed::FILE_NAME); - get_rows(query).iter().map(SslUsed::new).collect() + Ok(get_rows(query)?.iter().map(SslUsed::new).collect()) } -pub fn connections() -> Vec { +pub fn connections() -> Result, PgExtrasError> { let query = read_file(Connections::FILE_NAME); - get_rows(query).iter().map(Connections::new).collect() + Ok(get_rows(query)?.iter().map(Connections::new).collect()) } -pub fn cache_hit(schema: Option) -> Vec { +pub fn cache_hit(schema: Option) -> Result, PgExtrasError> { let schema_name = schema.unwrap_or(get_default_schema()); let query = read_file(CacheHit::FILE_NAME).replace("%{schema}", schema_name.as_str()); - get_rows(&query).iter().map(CacheHit::new).collect() + Ok(get_rows(&query)?.iter().map(CacheHit::new).collect()) } -pub fn db_settings() -> Vec { +pub fn db_settings() -> Result, PgExtrasError> { let query = read_file("db_settings"); - get_rows(query).iter().map(DbSetting::new).collect() + Ok(get_rows(query)?.iter().map(DbSetting::new).collect()) } pub fn read_file(filename: &str) -> &'static str { @@ -270,19 +276,33 @@ pub fn read_file(filename: &str) -> &'static str { } } -fn get_rows(query: &str) -> Vec { - connection() +#[derive(Error, Debug)] +pub enum PgExtrasError { + #[error("Both $DATABASE_URL and $PG_EXTRAS_DATABASE_URL are not set")] + MissingConfigVars(), + #[error("Cannot connect to database")] + ConnectionError(), + #[error("Unknown pg-extras error")] + Unknown, +} + +fn get_rows(query: &str) -> Result, PgExtrasError> { + Ok(connection()? .query(query, &[]) - .unwrap_or_else(|_| Vec::new()) + .unwrap_or_else(|_| Vec::new())) } -fn connection() -> Client { - let database_url = env::var("PG_EXTRAS_DATABASE_URL").unwrap_or_else(|_| { - env::var("DATABASE_URL") - .expect("Both $DATABASE_URL and $PG_EXTRAS_DATABASE_URL are not set") - }); +fn connection() -> Result { + let database_url = + match env::var("PG_EXTRAS_DATABASE_URL").or_else(|_| env::var("DATABASE_URL")) { + Ok(url) => url, + Err(_) => return Err(PgExtrasError::MissingConfigVars()), + }; - Client::connect(&database_url, NoTls).unwrap() + match Client::connect(&database_url, NoTls) { + Ok(client) => Ok(client), + Err(_) => Err(PgExtrasError::ConnectionError()), + } } #[cfg(test)] @@ -290,38 +310,39 @@ mod tests { use super::*; #[test] - fn it_works() { - render_table(cache_hit(None)); - render_table(bloat()); - render_table(blocking(None)); - render_table(calls(None)); - render_table(extensions()); - render_table(table_cache_hit()); - render_table(tables(None)); - render_table(index_cache_hit(None)); - render_table(indexes()); - render_table(index_size()); - render_table(index_usage(None)); - render_table(index_scans(None)); - render_table(null_indexes(None)); - render_table(locks()); - render_table(all_locks()); - render_table(long_running_queries()); - render_table(mandelbrot()); - render_table(outliers()); - render_table(records_rank(None)); - render_table(seq_scans(None)); - render_table(table_index_scans(None)); - render_table(table_indexes_size(None)); - render_table(table_size()); - render_table(total_index_size()); - render_table(total_table_size()); - render_table(unused_indexes(None)); - render_table(duplicate_indexes()); - render_table(vacuum_stats()); - render_table(buffercache_stats()); - render_table(buffercache_usage()); - render_table(ssl_used()); - render_table(connections()); + fn it_works() -> Result<(), PgExtrasError> { + render_table(cache_hit(None)?); + render_table(bloat()?); + render_table(blocking(None)?); + render_table(calls(None)?); + render_table(extensions()?); + render_table(table_cache_hit()?); + render_table(tables(None)?); + render_table(index_cache_hit(None)?); + render_table(indexes()?); + render_table(index_size()?); + render_table(index_usage(None)?); + render_table(index_scans(None)?); + render_table(null_indexes(None)?); + render_table(locks()?); + render_table(all_locks()?); + render_table(long_running_queries()?); + render_table(mandelbrot()?); + render_table(outliers()?); + render_table(records_rank(None)?); + render_table(seq_scans(None)?); + render_table(table_index_scans(None)?); + render_table(table_indexes_size(None)?); + render_table(table_size()?); + render_table(total_index_size()?); + render_table(total_table_size()?); + render_table(unused_indexes(None)?); + render_table(duplicate_indexes()?); + render_table(vacuum_stats()?); + render_table(buffercache_stats()?); + render_table(buffercache_usage()?); + render_table(ssl_used()?); + render_table(connections()?); + Ok(()) } }