diff --git a/src/compressor.rs b/src/compressor.rs index 353ec80..73491be 100644 --- a/src/compressor.rs +++ b/src/compressor.rs @@ -156,7 +156,7 @@ impl<'a> Compressor<'a> { ) -> Compressor<'a> { let levels = level_info .iter() - .map(|l| Level::restore((*l).max_length, (*l).current_chain_length, (*l).head)) + .map(|l| Level::restore(l.max_length, l.current_chain_length, l.head)) .collect(); let mut compressor = Compressor { diff --git a/src/database.rs b/src/database.rs index 9c5fdb4..72da347 100644 --- a/src/database.rs +++ b/src/database.rs @@ -237,15 +237,9 @@ fn load_map_from_db( let mut missing_sgs: Vec<_> = state_group_map .iter() .filter_map(|(_sg, entry)| { - if let Some(prev_sg) = entry.prev_state_group { - if state_group_map.contains_key(&prev_sg) { - None - } else { - Some(prev_sg) - } - } else { - None - } + entry + .prev_state_group + .filter(|&prev_sg| !state_group_map.contains_key(&prev_sg)) }) .collect(); diff --git a/src/lib.rs b/src/lib.rs index 4d97041..f0ccb09 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -563,7 +563,7 @@ pub fn continue_run( let (state_group_map, max_group_found) = database::reload_data_from_db(db_url, room_id, start, Some(chunk_size), level_info)?; - let original_num_rows = state_group_map.iter().map(|(_, v)| v.state_map.len()).sum(); + let original_num_rows = state_group_map.values().map(|v| v.state_map.len()).sum(); // Now we actually call the compression algorithm. let compressor = Compressor::compress_from_save(&state_group_map, level_info);