All printing replaced by logging (#67)

This commit is contained in:
Azrenbeth
2021-09-27 12:20:12 +01:00
committed by GitHub
parent bc30942e2d
commit b8e323c321
9 changed files with 66 additions and 33 deletions

2
Cargo.lock generated
View File

@@ -1139,8 +1139,10 @@ name = "synapse_compress_state"
version = "0.1.0"
dependencies = [
"clap",
"env_logger 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"indicatif",
"jemallocator",
"log",
"openssl",
"postgres",
"postgres-openssl",

View File

@@ -18,6 +18,8 @@ postgres-openssl = "0.5.0"
rand = "0.8.0"
rayon = "1.3.0"
string_cache = "0.8.0"
env_logger = "0.9.0"
log = "0.4.14"
[dependencies.state-map]
git = "https://github.com/matrix-org/rust-matrix-state-map"

View File

@@ -357,9 +357,9 @@ fn functions_are_self_consistent() {
pub fn setup_logger() {
// setup the logger for the auto_compressor
// The default can be overwritten with COMPRESSOR_LOG_LEVEL
// The default can be overwritten with RUST_LOG
// see the README for more information <--- TODO
if env::var("COMPRESSOR_LOG_LEVEL").is_err() {
if env::var("RUST_LOG").is_err() {
let mut log_builder = env_logger::builder();
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
log_builder.is_test(true);
@@ -370,8 +370,8 @@ pub fn setup_logger() {
// use try_init() incase the logger has been setup by some previous test
let _ = log_builder.try_init();
} else {
// If COMPRESSOR_LOG_LEVEL was set then use that
let mut log_builder = env_logger::Builder::from_env("COMPRESSOR_LOG_LEVEL");
// If RUST_LOG was set then use that
let mut log_builder = env_logger::Builder::from_env("RUST_LOG");
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
log_builder.is_test(true);
// use try_init() in case the logger has been setup by some previous test

View File

@@ -2,13 +2,14 @@ use auto_compressor::state_saving::{
connect_to_database, create_tables_if_needed, read_room_compressor_state,
write_room_compressor_state,
};
use compressor_integration_tests::{clear_compressor_state, DB_URL};
use compressor_integration_tests::{clear_compressor_state, setup_logger, DB_URL};
use serial_test::serial;
use synapse_compress_state::Level;
#[test]
#[serial(db)]
fn write_then_read_state_gives_correct_results() {
setup_logger();
let mut client = connect_to_database(DB_URL).unwrap();
create_tables_if_needed(&mut client).unwrap();
clear_compressor_state();

View File

@@ -7,7 +7,7 @@ use compressor_integration_tests::{
compressed_3_3_from_0_to_13_with_state, line_segments_with_state, line_with_state,
structure_from_edges_with_state,
},
DB_URL,
setup_logger, DB_URL,
};
use serial_test::serial;
use synapse_compress_state::{run, Config};
@@ -22,6 +22,7 @@ use synapse_compress_state::{run, Config};
#[test]
#[serial(db)]
fn run_succeeds_without_crashing() {
setup_logger();
// This starts with the following structure
//
// 0-1-2-3-4-5-6-7-8-9-10-11-12-13
@@ -67,6 +68,7 @@ fn run_succeeds_without_crashing() {
#[test]
#[serial(db)]
fn changes_commited_if_no_min_saved_rows() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -132,6 +134,7 @@ fn changes_commited_if_no_min_saved_rows() {
#[test]
#[serial(db)]
fn changes_commited_if_min_saved_rows_exceeded() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -197,6 +200,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
#[test]
#[serial(db)]
fn changes_not_commited_if_fewer_than_min_saved_rows() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -263,6 +267,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
#[test]
#[should_panic(expected = "Error connecting to the database:")]
fn run_panics_if_invalid_db_url() {
setup_logger();
// set up the config options
let db_url = "thisIsAnInvalidURL".to_string();
let room_id = "room1".to_string();
@@ -298,6 +303,7 @@ fn run_panics_if_invalid_db_url() {
#[test]
#[serial(db)]
fn run_only_affects_given_room_id() {
setup_logger();
// build room1 stuff up
// This starts with the following structure
//
@@ -374,6 +380,7 @@ fn run_only_affects_given_room_id() {
#[test]
#[serial(db)]
fn run_respects_groups_to_compress() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -456,6 +463,7 @@ fn run_respects_groups_to_compress() {
#[test]
#[serial(db)]
fn run_is_idempotent_when_run_on_whole_room() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13

View File

@@ -2,7 +2,7 @@ use compressor_integration_tests::{
add_contents_to_database, database_collapsed_states_match_map, database_structure_matches_map,
empty_database,
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
DB_URL,
setup_logger, DB_URL,
};
use serial_test::serial;
use synapse_compress_state::{continue_run, Level};
@@ -13,6 +13,7 @@ use synapse_compress_state::{continue_run, Level};
#[test]
#[serial(db)]
fn continue_run_called_twice_same_as_run() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use indicatif::{ProgressBar, ProgressStyle};
use log::{debug, trace};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use postgres::{fallible_iterator::FallibleIterator, types::ToSql, Client};
use postgres_openssl::MakeTlsConnector;
@@ -40,7 +41,6 @@ use super::StateGroupEntry;
/// * `max_state_group` - If specified, then only fetch the entries for state
/// groups lower than or equal to this number.
/// * 'groups_to_compress' - The number of groups to get from the database before stopping
pub fn get_data_from_db(
db_url: &str,
room_id: &str,
@@ -59,7 +59,6 @@ pub fn get_data_from_db(
// Search for the group id of the groups_to_compress'th group after min_state_group
// If this is saved, then the compressor can continue by having min_state_group being
// set to this maximum. If no such group can be found then return None.
let max_group_found = find_max_group(
&mut client,
room_id,
@@ -222,7 +221,7 @@ fn load_map_from_db(
max_group_found,
));
println!("Got initial state from database. Checking for any missing state groups...");
debug!("Got initial state from database. Checking for any missing state groups...");
// Due to reasons some of the state groups appear in the edges table, but
// not in the state_groups_state table.
@@ -251,14 +250,14 @@ fn load_map_from_db(
.collect();
if missing_sgs.is_empty() {
// println!("No missing state groups");
trace!("No missing state groups");
break;
}
missing_sgs.sort_unstable();
missing_sgs.dedup();
// println!("Missing {} state groups", missing_sgs.len());
trace!("Missing {} state groups", missing_sgs.len());
// find state groups not picked up already and add them to the map
let map = get_missing_from_db(client, &missing_sgs, min_state_group, max_group_found);
@@ -535,7 +534,7 @@ pub fn send_changes_to_db(
let mut client = Client::connect(db_url, connector).unwrap();
println!("Writing changes...");
debug!("Writing changes...");
// setup the progress bar
let pb: ProgressBar;

View File

@@ -20,6 +20,7 @@
// of arguments - this hopefully doesn't make the code unclear
// #[allow(clippy::too_many_arguments)] is therefore used around some functions
use log::{info, warn};
use pyo3::{exceptions, prelude::*};
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
@@ -290,7 +291,7 @@ impl Config {
pub fn run(mut config: Config) {
// First we need to get the current state groups
println!("Fetching state from DB for room '{}'...", config.room_id);
info!("Fetching state from DB for room '{}'...", config.room_id);
let (state_group_map, max_group_found) = database::get_data_from_db(
&config.db_url,
@@ -301,19 +302,19 @@ pub fn run(mut config: Config) {
)
.unwrap_or_else(|| panic!("No state groups found within this range"));
println!("Fetched state groups up to {}", max_group_found);
info!("Fetched state groups up to {}", max_group_found);
println!("Number of state groups: {}", state_group_map.len());
info!("Number of state groups: {}", state_group_map.len());
let original_summed_size = state_group_map
.iter()
.fold(0, |acc, (_, v)| acc + v.state_map.len());
println!("Number of rows in current table: {}", original_summed_size);
info!("Number of rows in current table: {}", original_summed_size);
// Now we actually call the compression algorithm.
println!("Compressing state...");
info!("Compressing state...");
let compressor = Compressor::compress(&state_group_map, &config.level_sizes.0);
@@ -327,22 +328,22 @@ pub fn run(mut config: Config) {
let ratio = (compressed_summed_size as f64) / (original_summed_size as f64);
println!(
info!(
"Number of rows after compression: {} ({:.2}%)",
compressed_summed_size,
ratio * 100.
);
println!("Compression Statistics:");
println!(
info!("Compression Statistics:");
info!(
" Number of forced resets due to lacking prev: {}",
compressor.stats.resets_no_suitable_prev
);
println!(
info!(
" Number of compressed rows caused by the above: {}",
compressor.stats.resets_no_suitable_prev_size
);
println!(
info!(
" Number of state groups changed: {}",
compressor.stats.state_groups_changed
);
@@ -352,14 +353,14 @@ pub fn run(mut config: Config) {
}
if ratio > 1.0 {
println!("This compression would not remove any rows. Exiting.");
warn!("This compression would not remove any rows. Exiting.");
return;
}
if let Some(min) = config.min_saved_rows {
let saving = (original_summed_size - compressed_summed_size) as i32;
if saving < min {
println!(
warn!(
"Only {} rows would be saved by this compression. Skipping output.",
saving
);
@@ -485,7 +486,7 @@ fn output_sql(
return;
}
println!("Writing changes...");
info!("Writing changes...");
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
@@ -559,7 +560,7 @@ pub fn continue_run(
let ratio = (new_num_rows as f64) / (original_num_rows as f64);
if ratio > 1.0 {
println!("This compression would not remove any rows. Aborting.");
warn!("This compression would not remove any rows. Aborting.");
return Some(ChunkStats {
new_level_info: compressor.get_level_info(),
last_compressed_group: max_group_found,
@@ -600,7 +601,7 @@ fn check_that_maps_match(
old_map: &BTreeMap<i64, StateGroupEntry>,
new_map: &BTreeMap<i64, StateGroupEntry>,
) {
println!("Checking that state maps match...");
info!("Checking that state maps match...");
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
@@ -625,10 +626,10 @@ fn check_that_maps_match(
pb.inc(1);
if expected != actual {
println!("State Group: {}", sg);
println!("Expected: {:#?}", expected);
println!("actual: {:#?}", actual);
Err(format!("States for group {} do not match", sg))
Err(format!(
"States for group {} do not match. Expected {:#?}, found {:#?}",
sg, expected, actual
))
} else {
Ok(())
}
@@ -637,7 +638,7 @@ fn check_that_maps_match(
pb.finish();
println!("New state map matches old one");
info!("New state map matches old one");
}
/// Gets the full state for a given group from the map (of deltas)

View File

@@ -20,8 +20,27 @@
#[global_allocator]
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
use log::LevelFilter;
use std::env;
use std::io::Write;
use synapse_compress_state as comp_state;
fn main() {
// setup the logger
// The default can be overwritten with RUST_LOG
// see the README for more information <---- TODO
if env::var("RUST_LOG").is_err() {
let mut log_builder = env_logger::builder();
// Only output the log message (and not the prefixed timestamp etc.)
log_builder.format(|buf, record| writeln!(buf, "{}", record.args()));
// By default print all of the debugging messages from this library
log_builder.filter_module("synapse_compress_state", LevelFilter::Debug);
log_builder.init();
} else {
// If RUST_LOG was set then use that
env_logger::Builder::from_env("RUST_LOG").init();
}
comp_state::run(comp_state::Config::parse_arguments());
}