All printing replaced by logging (#67)
This commit is contained in:
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -1139,8 +1139,10 @@ name = "synapse_compress_state"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap",
|
"clap",
|
||||||
|
"env_logger 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"indicatif",
|
"indicatif",
|
||||||
"jemallocator",
|
"jemallocator",
|
||||||
|
"log",
|
||||||
"openssl",
|
"openssl",
|
||||||
"postgres",
|
"postgres",
|
||||||
"postgres-openssl",
|
"postgres-openssl",
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ postgres-openssl = "0.5.0"
|
|||||||
rand = "0.8.0"
|
rand = "0.8.0"
|
||||||
rayon = "1.3.0"
|
rayon = "1.3.0"
|
||||||
string_cache = "0.8.0"
|
string_cache = "0.8.0"
|
||||||
|
env_logger = "0.9.0"
|
||||||
|
log = "0.4.14"
|
||||||
|
|
||||||
[dependencies.state-map]
|
[dependencies.state-map]
|
||||||
git = "https://github.com/matrix-org/rust-matrix-state-map"
|
git = "https://github.com/matrix-org/rust-matrix-state-map"
|
||||||
|
|||||||
@@ -357,9 +357,9 @@ fn functions_are_self_consistent() {
|
|||||||
|
|
||||||
pub fn setup_logger() {
|
pub fn setup_logger() {
|
||||||
// setup the logger for the auto_compressor
|
// setup the logger for the auto_compressor
|
||||||
// The default can be overwritten with COMPRESSOR_LOG_LEVEL
|
// The default can be overwritten with RUST_LOG
|
||||||
// see the README for more information <--- TODO
|
// see the README for more information <--- TODO
|
||||||
if env::var("COMPRESSOR_LOG_LEVEL").is_err() {
|
if env::var("RUST_LOG").is_err() {
|
||||||
let mut log_builder = env_logger::builder();
|
let mut log_builder = env_logger::builder();
|
||||||
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
||||||
log_builder.is_test(true);
|
log_builder.is_test(true);
|
||||||
@@ -370,8 +370,8 @@ pub fn setup_logger() {
|
|||||||
// use try_init() incase the logger has been setup by some previous test
|
// use try_init() incase the logger has been setup by some previous test
|
||||||
let _ = log_builder.try_init();
|
let _ = log_builder.try_init();
|
||||||
} else {
|
} else {
|
||||||
// If COMPRESSOR_LOG_LEVEL was set then use that
|
// If RUST_LOG was set then use that
|
||||||
let mut log_builder = env_logger::Builder::from_env("COMPRESSOR_LOG_LEVEL");
|
let mut log_builder = env_logger::Builder::from_env("RUST_LOG");
|
||||||
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
||||||
log_builder.is_test(true);
|
log_builder.is_test(true);
|
||||||
// use try_init() in case the logger has been setup by some previous test
|
// use try_init() in case the logger has been setup by some previous test
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ use auto_compressor::state_saving::{
|
|||||||
connect_to_database, create_tables_if_needed, read_room_compressor_state,
|
connect_to_database, create_tables_if_needed, read_room_compressor_state,
|
||||||
write_room_compressor_state,
|
write_room_compressor_state,
|
||||||
};
|
};
|
||||||
use compressor_integration_tests::{clear_compressor_state, DB_URL};
|
use compressor_integration_tests::{clear_compressor_state, setup_logger, DB_URL};
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
use synapse_compress_state::Level;
|
use synapse_compress_state::Level;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn write_then_read_state_gives_correct_results() {
|
fn write_then_read_state_gives_correct_results() {
|
||||||
|
setup_logger();
|
||||||
let mut client = connect_to_database(DB_URL).unwrap();
|
let mut client = connect_to_database(DB_URL).unwrap();
|
||||||
create_tables_if_needed(&mut client).unwrap();
|
create_tables_if_needed(&mut client).unwrap();
|
||||||
clear_compressor_state();
|
clear_compressor_state();
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use compressor_integration_tests::{
|
|||||||
compressed_3_3_from_0_to_13_with_state, line_segments_with_state, line_with_state,
|
compressed_3_3_from_0_to_13_with_state, line_segments_with_state, line_with_state,
|
||||||
structure_from_edges_with_state,
|
structure_from_edges_with_state,
|
||||||
},
|
},
|
||||||
DB_URL,
|
setup_logger, DB_URL,
|
||||||
};
|
};
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
use synapse_compress_state::{run, Config};
|
use synapse_compress_state::{run, Config};
|
||||||
@@ -22,6 +22,7 @@ use synapse_compress_state::{run, Config};
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn run_succeeds_without_crashing() {
|
fn run_succeeds_without_crashing() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2-3-4-5-6-7-8-9-10-11-12-13
|
// 0-1-2-3-4-5-6-7-8-9-10-11-12-13
|
||||||
@@ -67,6 +68,7 @@ fn run_succeeds_without_crashing() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn changes_commited_if_no_min_saved_rows() {
|
fn changes_commited_if_no_min_saved_rows() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
@@ -132,6 +134,7 @@ fn changes_commited_if_no_min_saved_rows() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn changes_commited_if_min_saved_rows_exceeded() {
|
fn changes_commited_if_min_saved_rows_exceeded() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
@@ -197,6 +200,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn changes_not_commited_if_fewer_than_min_saved_rows() {
|
fn changes_not_commited_if_fewer_than_min_saved_rows() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
@@ -263,6 +267,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "Error connecting to the database:")]
|
#[should_panic(expected = "Error connecting to the database:")]
|
||||||
fn run_panics_if_invalid_db_url() {
|
fn run_panics_if_invalid_db_url() {
|
||||||
|
setup_logger();
|
||||||
// set up the config options
|
// set up the config options
|
||||||
let db_url = "thisIsAnInvalidURL".to_string();
|
let db_url = "thisIsAnInvalidURL".to_string();
|
||||||
let room_id = "room1".to_string();
|
let room_id = "room1".to_string();
|
||||||
@@ -298,6 +303,7 @@ fn run_panics_if_invalid_db_url() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn run_only_affects_given_room_id() {
|
fn run_only_affects_given_room_id() {
|
||||||
|
setup_logger();
|
||||||
// build room1 stuff up
|
// build room1 stuff up
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
@@ -374,6 +380,7 @@ fn run_only_affects_given_room_id() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn run_respects_groups_to_compress() {
|
fn run_respects_groups_to_compress() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
@@ -456,6 +463,7 @@ fn run_respects_groups_to_compress() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn run_is_idempotent_when_run_on_whole_room() {
|
fn run_is_idempotent_when_run_on_whole_room() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use compressor_integration_tests::{
|
|||||||
add_contents_to_database, database_collapsed_states_match_map, database_structure_matches_map,
|
add_contents_to_database, database_collapsed_states_match_map, database_structure_matches_map,
|
||||||
empty_database,
|
empty_database,
|
||||||
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
|
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
|
||||||
DB_URL,
|
setup_logger, DB_URL,
|
||||||
};
|
};
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
use synapse_compress_state::{continue_run, Level};
|
use synapse_compress_state::{continue_run, Level};
|
||||||
@@ -13,6 +13,7 @@ use synapse_compress_state::{continue_run, Level};
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial(db)]
|
#[serial(db)]
|
||||||
fn continue_run_called_twice_same_as_run() {
|
fn continue_run_called_twice_same_as_run() {
|
||||||
|
setup_logger();
|
||||||
// This starts with the following structure
|
// This starts with the following structure
|
||||||
//
|
//
|
||||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use indicatif::{ProgressBar, ProgressStyle};
|
use indicatif::{ProgressBar, ProgressStyle};
|
||||||
|
use log::{debug, trace};
|
||||||
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
|
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
|
||||||
use postgres::{fallible_iterator::FallibleIterator, types::ToSql, Client};
|
use postgres::{fallible_iterator::FallibleIterator, types::ToSql, Client};
|
||||||
use postgres_openssl::MakeTlsConnector;
|
use postgres_openssl::MakeTlsConnector;
|
||||||
@@ -40,7 +41,6 @@ use super::StateGroupEntry;
|
|||||||
/// * `max_state_group` - If specified, then only fetch the entries for state
|
/// * `max_state_group` - If specified, then only fetch the entries for state
|
||||||
/// groups lower than or equal to this number.
|
/// groups lower than or equal to this number.
|
||||||
/// * 'groups_to_compress' - The number of groups to get from the database before stopping
|
/// * 'groups_to_compress' - The number of groups to get from the database before stopping
|
||||||
|
|
||||||
pub fn get_data_from_db(
|
pub fn get_data_from_db(
|
||||||
db_url: &str,
|
db_url: &str,
|
||||||
room_id: &str,
|
room_id: &str,
|
||||||
@@ -59,7 +59,6 @@ pub fn get_data_from_db(
|
|||||||
// Search for the group id of the groups_to_compress'th group after min_state_group
|
// Search for the group id of the groups_to_compress'th group after min_state_group
|
||||||
// If this is saved, then the compressor can continue by having min_state_group being
|
// If this is saved, then the compressor can continue by having min_state_group being
|
||||||
// set to this maximum. If no such group can be found then return None.
|
// set to this maximum. If no such group can be found then return None.
|
||||||
|
|
||||||
let max_group_found = find_max_group(
|
let max_group_found = find_max_group(
|
||||||
&mut client,
|
&mut client,
|
||||||
room_id,
|
room_id,
|
||||||
@@ -222,7 +221,7 @@ fn load_map_from_db(
|
|||||||
max_group_found,
|
max_group_found,
|
||||||
));
|
));
|
||||||
|
|
||||||
println!("Got initial state from database. Checking for any missing state groups...");
|
debug!("Got initial state from database. Checking for any missing state groups...");
|
||||||
|
|
||||||
// Due to reasons some of the state groups appear in the edges table, but
|
// Due to reasons some of the state groups appear in the edges table, but
|
||||||
// not in the state_groups_state table.
|
// not in the state_groups_state table.
|
||||||
@@ -251,14 +250,14 @@ fn load_map_from_db(
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if missing_sgs.is_empty() {
|
if missing_sgs.is_empty() {
|
||||||
// println!("No missing state groups");
|
trace!("No missing state groups");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
missing_sgs.sort_unstable();
|
missing_sgs.sort_unstable();
|
||||||
missing_sgs.dedup();
|
missing_sgs.dedup();
|
||||||
|
|
||||||
// println!("Missing {} state groups", missing_sgs.len());
|
trace!("Missing {} state groups", missing_sgs.len());
|
||||||
|
|
||||||
// find state groups not picked up already and add them to the map
|
// find state groups not picked up already and add them to the map
|
||||||
let map = get_missing_from_db(client, &missing_sgs, min_state_group, max_group_found);
|
let map = get_missing_from_db(client, &missing_sgs, min_state_group, max_group_found);
|
||||||
@@ -535,7 +534,7 @@ pub fn send_changes_to_db(
|
|||||||
|
|
||||||
let mut client = Client::connect(db_url, connector).unwrap();
|
let mut client = Client::connect(db_url, connector).unwrap();
|
||||||
|
|
||||||
println!("Writing changes...");
|
debug!("Writing changes...");
|
||||||
|
|
||||||
// setup the progress bar
|
// setup the progress bar
|
||||||
let pb: ProgressBar;
|
let pb: ProgressBar;
|
||||||
|
|||||||
41
src/lib.rs
41
src/lib.rs
@@ -20,6 +20,7 @@
|
|||||||
// of arguments - this hopefully doesn't make the code unclear
|
// of arguments - this hopefully doesn't make the code unclear
|
||||||
// #[allow(clippy::too_many_arguments)] is therefore used around some functions
|
// #[allow(clippy::too_many_arguments)] is therefore used around some functions
|
||||||
|
|
||||||
|
use log::{info, warn};
|
||||||
use pyo3::{exceptions, prelude::*};
|
use pyo3::{exceptions, prelude::*};
|
||||||
|
|
||||||
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
|
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
|
||||||
@@ -290,7 +291,7 @@ impl Config {
|
|||||||
|
|
||||||
pub fn run(mut config: Config) {
|
pub fn run(mut config: Config) {
|
||||||
// First we need to get the current state groups
|
// First we need to get the current state groups
|
||||||
println!("Fetching state from DB for room '{}'...", config.room_id);
|
info!("Fetching state from DB for room '{}'...", config.room_id);
|
||||||
|
|
||||||
let (state_group_map, max_group_found) = database::get_data_from_db(
|
let (state_group_map, max_group_found) = database::get_data_from_db(
|
||||||
&config.db_url,
|
&config.db_url,
|
||||||
@@ -301,19 +302,19 @@ pub fn run(mut config: Config) {
|
|||||||
)
|
)
|
||||||
.unwrap_or_else(|| panic!("No state groups found within this range"));
|
.unwrap_or_else(|| panic!("No state groups found within this range"));
|
||||||
|
|
||||||
println!("Fetched state groups up to {}", max_group_found);
|
info!("Fetched state groups up to {}", max_group_found);
|
||||||
|
|
||||||
println!("Number of state groups: {}", state_group_map.len());
|
info!("Number of state groups: {}", state_group_map.len());
|
||||||
|
|
||||||
let original_summed_size = state_group_map
|
let original_summed_size = state_group_map
|
||||||
.iter()
|
.iter()
|
||||||
.fold(0, |acc, (_, v)| acc + v.state_map.len());
|
.fold(0, |acc, (_, v)| acc + v.state_map.len());
|
||||||
|
|
||||||
println!("Number of rows in current table: {}", original_summed_size);
|
info!("Number of rows in current table: {}", original_summed_size);
|
||||||
|
|
||||||
// Now we actually call the compression algorithm.
|
// Now we actually call the compression algorithm.
|
||||||
|
|
||||||
println!("Compressing state...");
|
info!("Compressing state...");
|
||||||
|
|
||||||
let compressor = Compressor::compress(&state_group_map, &config.level_sizes.0);
|
let compressor = Compressor::compress(&state_group_map, &config.level_sizes.0);
|
||||||
|
|
||||||
@@ -327,22 +328,22 @@ pub fn run(mut config: Config) {
|
|||||||
|
|
||||||
let ratio = (compressed_summed_size as f64) / (original_summed_size as f64);
|
let ratio = (compressed_summed_size as f64) / (original_summed_size as f64);
|
||||||
|
|
||||||
println!(
|
info!(
|
||||||
"Number of rows after compression: {} ({:.2}%)",
|
"Number of rows after compression: {} ({:.2}%)",
|
||||||
compressed_summed_size,
|
compressed_summed_size,
|
||||||
ratio * 100.
|
ratio * 100.
|
||||||
);
|
);
|
||||||
|
|
||||||
println!("Compression Statistics:");
|
info!("Compression Statistics:");
|
||||||
println!(
|
info!(
|
||||||
" Number of forced resets due to lacking prev: {}",
|
" Number of forced resets due to lacking prev: {}",
|
||||||
compressor.stats.resets_no_suitable_prev
|
compressor.stats.resets_no_suitable_prev
|
||||||
);
|
);
|
||||||
println!(
|
info!(
|
||||||
" Number of compressed rows caused by the above: {}",
|
" Number of compressed rows caused by the above: {}",
|
||||||
compressor.stats.resets_no_suitable_prev_size
|
compressor.stats.resets_no_suitable_prev_size
|
||||||
);
|
);
|
||||||
println!(
|
info!(
|
||||||
" Number of state groups changed: {}",
|
" Number of state groups changed: {}",
|
||||||
compressor.stats.state_groups_changed
|
compressor.stats.state_groups_changed
|
||||||
);
|
);
|
||||||
@@ -352,14 +353,14 @@ pub fn run(mut config: Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ratio > 1.0 {
|
if ratio > 1.0 {
|
||||||
println!("This compression would not remove any rows. Exiting.");
|
warn!("This compression would not remove any rows. Exiting.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(min) = config.min_saved_rows {
|
if let Some(min) = config.min_saved_rows {
|
||||||
let saving = (original_summed_size - compressed_summed_size) as i32;
|
let saving = (original_summed_size - compressed_summed_size) as i32;
|
||||||
if saving < min {
|
if saving < min {
|
||||||
println!(
|
warn!(
|
||||||
"Only {} rows would be saved by this compression. Skipping output.",
|
"Only {} rows would be saved by this compression. Skipping output.",
|
||||||
saving
|
saving
|
||||||
);
|
);
|
||||||
@@ -485,7 +486,7 @@ fn output_sql(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Writing changes...");
|
info!("Writing changes...");
|
||||||
|
|
||||||
let pb: ProgressBar;
|
let pb: ProgressBar;
|
||||||
if cfg!(feature = "no-progress-bars") {
|
if cfg!(feature = "no-progress-bars") {
|
||||||
@@ -559,7 +560,7 @@ pub fn continue_run(
|
|||||||
let ratio = (new_num_rows as f64) / (original_num_rows as f64);
|
let ratio = (new_num_rows as f64) / (original_num_rows as f64);
|
||||||
|
|
||||||
if ratio > 1.0 {
|
if ratio > 1.0 {
|
||||||
println!("This compression would not remove any rows. Aborting.");
|
warn!("This compression would not remove any rows. Aborting.");
|
||||||
return Some(ChunkStats {
|
return Some(ChunkStats {
|
||||||
new_level_info: compressor.get_level_info(),
|
new_level_info: compressor.get_level_info(),
|
||||||
last_compressed_group: max_group_found,
|
last_compressed_group: max_group_found,
|
||||||
@@ -600,7 +601,7 @@ fn check_that_maps_match(
|
|||||||
old_map: &BTreeMap<i64, StateGroupEntry>,
|
old_map: &BTreeMap<i64, StateGroupEntry>,
|
||||||
new_map: &BTreeMap<i64, StateGroupEntry>,
|
new_map: &BTreeMap<i64, StateGroupEntry>,
|
||||||
) {
|
) {
|
||||||
println!("Checking that state maps match...");
|
info!("Checking that state maps match...");
|
||||||
|
|
||||||
let pb: ProgressBar;
|
let pb: ProgressBar;
|
||||||
if cfg!(feature = "no-progress-bars") {
|
if cfg!(feature = "no-progress-bars") {
|
||||||
@@ -625,10 +626,10 @@ fn check_that_maps_match(
|
|||||||
pb.inc(1);
|
pb.inc(1);
|
||||||
|
|
||||||
if expected != actual {
|
if expected != actual {
|
||||||
println!("State Group: {}", sg);
|
Err(format!(
|
||||||
println!("Expected: {:#?}", expected);
|
"States for group {} do not match. Expected {:#?}, found {:#?}",
|
||||||
println!("actual: {:#?}", actual);
|
sg, expected, actual
|
||||||
Err(format!("States for group {} do not match", sg))
|
))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -637,7 +638,7 @@ fn check_that_maps_match(
|
|||||||
|
|
||||||
pb.finish();
|
pb.finish();
|
||||||
|
|
||||||
println!("New state map matches old one");
|
info!("New state map matches old one");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the full state for a given group from the map (of deltas)
|
/// Gets the full state for a given group from the map (of deltas)
|
||||||
|
|||||||
19
src/main.rs
19
src/main.rs
@@ -20,8 +20,27 @@
|
|||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||||
|
|
||||||
|
use log::LevelFilter;
|
||||||
|
use std::env;
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
use synapse_compress_state as comp_state;
|
use synapse_compress_state as comp_state;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
// setup the logger
|
||||||
|
// The default can be overwritten with RUST_LOG
|
||||||
|
// see the README for more information <---- TODO
|
||||||
|
if env::var("RUST_LOG").is_err() {
|
||||||
|
let mut log_builder = env_logger::builder();
|
||||||
|
// Only output the log message (and not the prefixed timestamp etc.)
|
||||||
|
log_builder.format(|buf, record| writeln!(buf, "{}", record.args()));
|
||||||
|
// By default print all of the debugging messages from this library
|
||||||
|
log_builder.filter_module("synapse_compress_state", LevelFilter::Debug);
|
||||||
|
log_builder.init();
|
||||||
|
} else {
|
||||||
|
// If RUST_LOG was set then use that
|
||||||
|
env_logger::Builder::from_env("RUST_LOG").init();
|
||||||
|
}
|
||||||
|
|
||||||
comp_state::run(comp_state::Config::parse_arguments());
|
comp_state::run(comp_state::Config::parse_arguments());
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user