Add option to commit changes to the database automatically (#53)
This commit is contained in:
172
src/lib.rs
172
src/lib.rs
@@ -107,6 +107,9 @@ pub struct Config {
|
||||
// Whether or not to output before and after directed graphs (these can be
|
||||
// visualised in somthing like Gephi)
|
||||
graphs: bool,
|
||||
// Whether or not to commit changes to the database automatically
|
||||
// N.B. currently assumes transactions is true (to be on the safe side)
|
||||
commit_changes: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -209,7 +212,14 @@ impl Config {
|
||||
.help("Output before and after graphs")
|
||||
.long_help(concat!("If this flag is set then output the node and edge information for",
|
||||
" the state_group directed graph built up from the predecessor state_group links.",
|
||||
" These can be looked at in something like Gephi (https://gephi.org)"))
|
||||
" These can be looked at in something like Gephi (https://gephi.org)")),
|
||||
).arg(
|
||||
Arg::with_name("commit_changes")
|
||||
.short("c")
|
||||
.help("Commit changes to the database")
|
||||
.long_help(concat!("If this flag is set then the changes the compressor makes will",
|
||||
" be committed to the database. This should be safe to use while synapse is running",
|
||||
" as it assumes by default that the transactions flag is set")),
|
||||
).get_matches();
|
||||
|
||||
let db_url = matches
|
||||
@@ -247,6 +257,8 @@ impl Config {
|
||||
|
||||
let graphs = matches.is_present("graphs");
|
||||
|
||||
let commit_changes = matches.is_present("commit_changes");
|
||||
|
||||
Config {
|
||||
db_url: String::from(db_url),
|
||||
output_file,
|
||||
@@ -258,6 +270,7 @@ impl Config {
|
||||
level_sizes,
|
||||
transactions,
|
||||
graphs,
|
||||
commit_changes,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,11 +371,95 @@ pub fn run(mut config: Config) {
|
||||
|
||||
output_sql(&mut config, &state_group_map, new_state_group_map);
|
||||
|
||||
// If commit_changes is set then commit the changes to the database
|
||||
if config.commit_changes {
|
||||
database::send_changes_to_db(&config, &state_group_map, new_state_group_map);
|
||||
}
|
||||
|
||||
if config.graphs {
|
||||
graphing::make_graphs(&state_group_map, new_state_group_map);
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce SQL code to carry out changes to database.
|
||||
///
|
||||
/// It returns an iterator where each call to `next()` will
|
||||
/// return the SQL to alter a single state group in the database
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `old_map` - An iterator through the state group data originally
|
||||
/// in the database
|
||||
/// * `new_map` - The state group data generated by the compressor to
|
||||
/// replace replace the old contents
|
||||
/// * `room_id` - The room_id that the compressor was working on
|
||||
fn generate_sql<'a>(
|
||||
old_map: &'a BTreeMap<i64, StateGroupEntry>,
|
||||
new_map: &'a BTreeMap<i64, StateGroupEntry>,
|
||||
room_id: &'a str,
|
||||
) -> impl Iterator<Item = String> + 'a {
|
||||
old_map.iter().map(move |(sg,old_entry)| {
|
||||
|
||||
let new_entry = &new_map[sg];
|
||||
|
||||
// Check if the new map has a different entry for this state group
|
||||
// N.B. also checks if in_range fields agree
|
||||
if old_entry != new_entry {
|
||||
// the sql commands that will carry out these changes
|
||||
let mut sql = String::new();
|
||||
|
||||
// remove the current edge
|
||||
sql.push_str(&format!(
|
||||
"DELETE FROM state_group_edges WHERE state_group = {};\n",
|
||||
sg
|
||||
));
|
||||
|
||||
// if the new entry has a predecessor then put that into state_group_edges
|
||||
if let Some(prev_sg) = new_entry.prev_state_group {
|
||||
sql.push_str(&format!("INSERT INTO state_group_edges (state_group, prev_state_group) VALUES ({}, {});\n", sg, prev_sg));
|
||||
}
|
||||
|
||||
// remove the current deltas for this state group
|
||||
sql.push_str(&format!(
|
||||
"DELETE FROM state_groups_state WHERE state_group = {};\n",
|
||||
sg
|
||||
));
|
||||
|
||||
if !new_entry.state_map.is_empty() {
|
||||
// place all the deltas for the state group in the new map into state_groups_state
|
||||
sql.push_str("INSERT INTO state_groups_state (state_group, room_id, type, state_key, event_id) VALUES\n");
|
||||
|
||||
let mut first = true;
|
||||
for ((t, s), e) in new_entry.state_map.iter() {
|
||||
// Add a comma at the start if not the first row to be inserted
|
||||
if first {
|
||||
sql.push_str(" ");
|
||||
first = false;
|
||||
} else {
|
||||
sql.push_str(" ,");
|
||||
}
|
||||
|
||||
// write the row to be insterted of the form:
|
||||
// (state_group, room_id, type, state_key, event_id)
|
||||
sql.push_str(&format!(
|
||||
"({}, {}, {}, {}, {})",
|
||||
sg,
|
||||
PGEscape(room_id),
|
||||
PGEscape(t),
|
||||
PGEscape(s),
|
||||
PGEscape(e)
|
||||
));
|
||||
}
|
||||
sql.push_str(";\n");
|
||||
}
|
||||
|
||||
sql
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Produces SQL code to carry out changes and saves it to file
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -393,61 +490,15 @@ fn output_sql(
|
||||
pb.enable_steady_tick(100);
|
||||
|
||||
if let Some(output) = &mut config.output_file {
|
||||
for (sg, old_entry) in old_map {
|
||||
let new_entry = &new_map[sg];
|
||||
|
||||
// N.B. also checks if in_range fields agree
|
||||
if old_entry != new_entry {
|
||||
if config.transactions {
|
||||
writeln!(output, "BEGIN;").unwrap();
|
||||
}
|
||||
writeln!(
|
||||
output,
|
||||
"DELETE FROM state_group_edges WHERE state_group = {};",
|
||||
sg
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if let Some(prev_sg) = new_entry.prev_state_group {
|
||||
writeln!(output, "INSERT INTO state_group_edges (state_group, prev_state_group) VALUES ({}, {});", sg, prev_sg).unwrap();
|
||||
}
|
||||
|
||||
writeln!(
|
||||
output,
|
||||
"DELETE FROM state_groups_state WHERE state_group = {};",
|
||||
sg
|
||||
)
|
||||
.unwrap();
|
||||
if !new_entry.state_map.is_empty() {
|
||||
writeln!(output, "INSERT INTO state_groups_state (state_group, room_id, type, state_key, event_id) VALUES").unwrap();
|
||||
let mut first = true;
|
||||
for ((t, s), e) in new_entry.state_map.iter() {
|
||||
if first {
|
||||
write!(output, " ").unwrap();
|
||||
first = false;
|
||||
} else {
|
||||
write!(output, " ,").unwrap();
|
||||
}
|
||||
writeln!(
|
||||
output,
|
||||
"({}, {}, {}, {}, {})",
|
||||
sg,
|
||||
PGEscape(&config.room_id),
|
||||
PGEscape(t),
|
||||
PGEscape(s),
|
||||
PGEscape(e)
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
writeln!(output, ";").unwrap();
|
||||
}
|
||||
|
||||
if config.transactions {
|
||||
writeln!(output, "COMMIT;").unwrap();
|
||||
}
|
||||
writeln!(output).unwrap();
|
||||
for mut sql_transaction in generate_sql(old_map, new_map, &config.room_id) {
|
||||
if config.transactions {
|
||||
sql_transaction.insert_str(0, "BEGIN;\n");
|
||||
sql_transaction.push_str("COMMIT;")
|
||||
}
|
||||
|
||||
write!(output, "{}", sql_transaction)
|
||||
.expect("Something went wrong while writing SQL to file");
|
||||
|
||||
pb.inc(1);
|
||||
}
|
||||
}
|
||||
@@ -557,6 +608,7 @@ impl Config {
|
||||
level_sizes: String,
|
||||
transactions: bool,
|
||||
graphs: bool,
|
||||
commit_changes: bool,
|
||||
) -> Result<Config, String> {
|
||||
let mut output: Option<File> = None;
|
||||
if let Some(file) = output_file {
|
||||
@@ -583,6 +635,7 @@ impl Config {
|
||||
level_sizes,
|
||||
transactions,
|
||||
graphs,
|
||||
commit_changes,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -605,7 +658,8 @@ impl Config {
|
||||
// have this default to true as is much worse to not have it if you need it
|
||||
// than to have it and not need it
|
||||
transactions = true,
|
||||
graphs = false
|
||||
graphs = false,
|
||||
commit_changes = false,
|
||||
)]
|
||||
fn run_compression(
|
||||
db_url: String,
|
||||
@@ -618,6 +672,7 @@ fn run_compression(
|
||||
level_sizes: String,
|
||||
transactions: bool,
|
||||
graphs: bool,
|
||||
commit_changes: bool,
|
||||
) -> PyResult<()> {
|
||||
let config = Config::new(
|
||||
db_url,
|
||||
@@ -630,6 +685,7 @@ fn run_compression(
|
||||
level_sizes,
|
||||
transactions,
|
||||
graphs,
|
||||
commit_changes,
|
||||
);
|
||||
match config {
|
||||
Err(e) => Err(PyErr::new::<exceptions::PyException, _>(e)),
|
||||
@@ -1071,6 +1127,7 @@ mod pyo3_tests {
|
||||
let level_sizes = "100,50,25".to_string();
|
||||
let transactions = false;
|
||||
let graphs = false;
|
||||
let commit_changes = false;
|
||||
|
||||
let config = Config::new(
|
||||
db_url.clone(),
|
||||
@@ -1083,6 +1140,7 @@ mod pyo3_tests {
|
||||
level_sizes,
|
||||
transactions,
|
||||
graphs,
|
||||
commit_changes,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -1099,6 +1157,7 @@ mod pyo3_tests {
|
||||
);
|
||||
assert_eq!(config.transactions, transactions);
|
||||
assert_eq!(config.graphs, graphs);
|
||||
assert_eq!(config.commit_changes, commit_changes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1114,6 +1173,7 @@ mod pyo3_tests {
|
||||
let level_sizes = "128,64,32".to_string();
|
||||
let transactions = true;
|
||||
let graphs = true;
|
||||
let commit_changes = true;
|
||||
|
||||
let config = Config::new(
|
||||
db_url.clone(),
|
||||
@@ -1126,6 +1186,7 @@ mod pyo3_tests {
|
||||
level_sizes,
|
||||
transactions,
|
||||
graphs,
|
||||
commit_changes,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -1142,5 +1203,6 @@ mod pyo3_tests {
|
||||
);
|
||||
assert_eq!(config.transactions, transactions);
|
||||
assert_eq!(config.graphs, graphs);
|
||||
assert_eq!(config.commit_changes, commit_changes);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user