Add method that compresses next chunk of room (#64)
This commit is contained in:
@@ -14,6 +14,8 @@ postgres-openssl = "0.5.0"
|
||||
rand = "0.8.0"
|
||||
synapse_compress_state = { path = "../" }
|
||||
auto_compressor = { path = "../auto_compressor/" }
|
||||
env_logger = "0.9.0"
|
||||
log = "0.4.14"
|
||||
|
||||
[dependencies.state-map]
|
||||
git = "https://github.com/matrix-org/rust-matrix-state-map"
|
||||
@@ -1,9 +1,10 @@
|
||||
use log::LevelFilter;
|
||||
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
|
||||
use postgres::{fallible_iterator::FallibleIterator, Client};
|
||||
use postgres_openssl::MakeTlsConnector;
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
use state_map::StateMap;
|
||||
use std::{borrow::Cow, collections::BTreeMap, fmt};
|
||||
use std::{borrow::Cow, collections::BTreeMap, env, fmt};
|
||||
use string_cache::DefaultAtom as Atom;
|
||||
|
||||
use synapse_compress_state::StateGroupEntry;
|
||||
@@ -352,3 +353,27 @@ fn functions_are_self_consistent() {
|
||||
assert!(database_collapsed_states_match_map(&initial));
|
||||
assert!(database_structure_matches_map(&initial));
|
||||
}
|
||||
|
||||
pub fn setup_logger() {
|
||||
// setup the logger for the auto_compressor
|
||||
// The default can be overwritten with COMPRESSOR_LOG_LEVEL
|
||||
// see the README for more information <--- TODO
|
||||
if env::var("COMPRESSOR_LOG_LEVEL").is_err() {
|
||||
let mut log_builder = env_logger::builder();
|
||||
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
||||
log_builder.is_test(true);
|
||||
// default to printing the debug information for both packages being tested
|
||||
// (Note that just setting the global level to debug will log every sql transaction)
|
||||
log_builder.filter_module("synapse_compress_state", LevelFilter::Debug);
|
||||
log_builder.filter_module("auto_compressor", LevelFilter::Debug);
|
||||
// use try_init() incase the logger has been setup by some previous test
|
||||
let _ = log_builder.try_init();
|
||||
} else {
|
||||
// If COMPRESSOR_LOG_LEVEL was set then use that
|
||||
let mut log_builder = env_logger::Builder::from_env("COMPRESSOR_LOG_LEVEL");
|
||||
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
|
||||
log_builder.is_test(true);
|
||||
// use try_init() in case the logger has been setup by some previous test
|
||||
let _ = log_builder.try_init();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,65 @@
|
||||
use auto_compressor::{
|
||||
manager::run_compressor_on_room_chunk,
|
||||
state_saving::{connect_to_database, create_tables_if_needed},
|
||||
};
|
||||
use compressor_integration_tests::{
|
||||
add_contents_to_database, clear_compressor_state, database_collapsed_states_match_map,
|
||||
database_structure_matches_map, empty_database,
|
||||
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
|
||||
setup_logger, DB_URL,
|
||||
};
|
||||
use serial_test::serial;
|
||||
use synapse_compress_state::Level;
|
||||
|
||||
#[test]
|
||||
#[serial(db)]
|
||||
fn run_compressor_on_room_chunk_works() {
|
||||
setup_logger();
|
||||
// This starts with the following structure
|
||||
//
|
||||
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
|
||||
//
|
||||
// Each group i has state:
|
||||
// ('node','is', i)
|
||||
// ('group', j, 'seen') - for all j less than i
|
||||
let initial = line_segments_with_state(0, 13);
|
||||
empty_database();
|
||||
add_contents_to_database("room1", &initial);
|
||||
|
||||
let mut client = connect_to_database(DB_URL).unwrap();
|
||||
create_tables_if_needed(&mut client).unwrap();
|
||||
clear_compressor_state();
|
||||
|
||||
// compress in 3,3 level sizes by default
|
||||
let default_levels = vec![Level::restore(3, 0, None), Level::restore(3, 0, None)];
|
||||
|
||||
// compress the first 7 groups in the room
|
||||
// structure should be the following afterwards
|
||||
// (NOTE: only including compressed groups)
|
||||
//
|
||||
// 0 3\
|
||||
// 1 4 6
|
||||
// 2 5
|
||||
run_compressor_on_room_chunk(DB_URL, "room1", 7, &default_levels).unwrap();
|
||||
|
||||
// compress the next 7 groups
|
||||
|
||||
run_compressor_on_room_chunk(DB_URL, "room1", 7, &default_levels).unwrap();
|
||||
|
||||
// This should have created the following structure in the database
|
||||
// i.e. groups 6 and 9 should have changed from before
|
||||
// N.B. this saves 11 rows
|
||||
//
|
||||
// 0 3\ 12
|
||||
// 1 4 6\ 13
|
||||
// 2 5 7 9
|
||||
// 8 10
|
||||
// 11
|
||||
let expected = compressed_3_3_from_0_to_13_with_state();
|
||||
|
||||
// Check that the database still gives correct states for each group!
|
||||
assert!(database_collapsed_states_match_map(&initial));
|
||||
|
||||
// Check that the structure of the database matches the expected structure
|
||||
assert!(database_structure_matches_map(&expected));
|
||||
}
|
||||
@@ -35,7 +35,7 @@ fn continue_run_called_twice_same_as_run() {
|
||||
|
||||
// compress in 3,3 level sizes
|
||||
// since the compressor hasn't been run before they are empty
|
||||
let level_info = vec![Level::restore(3, 0, None), Level::restore(3, 0, None)];
|
||||
let level_info = vec![Level::new(3), Level::new(3)];
|
||||
|
||||
// Run the compressor with those settings
|
||||
let chunk_stats_1 = continue_run(start, chunk_size, &db_url, &room_id, &level_info).unwrap();
|
||||
|
||||
Reference in New Issue
Block a user