9 Commits

Author SHA1 Message Date
David Robertson
dd62afb3d5 Update lockfile; drop Python 3.6 support
Python 3.6 EOLed at the end of 2021, see https://endoflife.date/python.
(pyO3 was refusing to build against 3.6).
2022-07-07 19:23:33 +01:00
David Robertson
65ffce2362 Tag v0.1.3 2022-07-07 19:13:47 +01:00
Jan Alexander Steffens
b4f3d8adbd Fix clippy warnings, update dependencies (#91) 2022-06-06 10:34:07 +01:00
reivilibre
04ac0529e1 Merge pull request #86 from saces/saces/fixlogfile 2022-03-16 13:58:37 +00:00
saces
d6df1ac5a4 remove log_file leftovers
PR #74 (Log to stderr not to a file) did not remove all references to
log_file, the file was still created, but remained empty.

The synapse_auto_compressor failed if run in a read only environment.

Signed-off-by: saces <saces@c-base.org>
2022-03-13 00:13:33 +01:00
saces
32e43da3e8 Add Docker files (#83)
Signed-off-by: saces <saces@c-base.org>
2022-02-17 10:28:43 +00:00
Jan Alexander Steffens
4c7316311b Update dependencies, use tikv-jemallocator (#73)
Signed-off-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
2021-10-26 08:54:56 +01:00
Jan Alexander Steffens
83e8dedfa9 lib: New argument -N to suppress verification (#26)
Signed-off-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
2021-10-26 08:50:34 +01:00
Sean Quah
38d800a775 Configure @matrix-org/synapse-core to be the code owner for the repo (#76)
Signed-off-by: Sean Quah <seanq@element.io>
2021-10-25 14:32:07 +01:00
17 changed files with 544 additions and 459 deletions

3
.dockerignore Normal file
View File

@@ -0,0 +1,3 @@
.git
.github
/target

2
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,2 @@
# Automatically request reviews from the synapse-core team when a pull request comes in.
* @matrix-org/synapse-core

726
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,9 +9,7 @@ version = "0.1.0"
edition = "2018"
[dependencies]
clap = "2.33.0"
indicatif = "0.16.0"
jemallocator = "0.3.2"
openssl = "0.10.32"
postgres = "0.19.0"
postgres-openssl = "0.5.0"
@@ -20,7 +18,7 @@ rayon = "1.3.0"
string_cache = "0.8.0"
env_logger = "0.9.0"
log = "0.4.14"
pyo3-log = "0.4.0"
pyo3-log = "0.6.0"
log-panics = "2.0.0"
[dependencies.state-map]
@@ -30,11 +28,19 @@ git = "https://github.com/matrix-org/rust-matrix-state-map"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies.clap]
version = "3.1.14"
features = ["cargo"]
[dependencies.pyo3]
version = "0.14.1"
features = ["extension-module","abi3-py36"]
version = "0.16.4"
features = ["extension-module"]
[dependencies.tikv-jemallocator]
version = "0.5.0"
optional = true
[features]
default = ["jemalloc"]
jemalloc = []
jemalloc = ["tikv-jemallocator"]
no-progress-bars = []

22
Dockerfile Normal file
View File

@@ -0,0 +1,22 @@
FROM rust:alpine AS builder
RUN apk add python3 musl-dev pkgconfig openssl-dev make
ENV RUSTFLAGS="-C target-feature=-crt-static"
WORKDIR /opt/synapse-compressor/
COPY . .
RUN cargo build
WORKDIR /opt/synapse-compressor/synapse_auto_compressor/
RUN cargo build
FROM alpine
RUN apk add --no-cache libgcc
COPY --from=builder /opt/synapse-compressor/target/debug/synapse_compress_state /usr/local/bin/synapse_compress_state
COPY --from=builder /opt/synapse-compressor/target/debug/synapse_auto_compressor /usr/local/bin/synapse_auto_compressor

View File

@@ -197,7 +197,7 @@ $ docker-compose down
# Using the synapse_compress_state library
If you want to use the compressor in another project, it is recomended that you
use jemalloc `https://github.com/gnzlbg/jemallocator`.
use jemalloc `https://github.com/tikv/jemallocator`.
To prevent the progress bars from being shown, use the `no-progress-bars` feature.
(See `synapse_auto_compressor/Cargo.toml` for an example)

View File

@@ -46,10 +46,11 @@ fn run_succeeds_without_crashing() {
let transactions = true;
let graphs = false;
let commit_changes = false;
let verify = true;
let config = Config::new(
db_url.clone(),
room_id.clone(),
db_url,
room_id,
output_file,
min_state_group,
groups_to_compress,
@@ -59,6 +60,7 @@ fn run_succeeds_without_crashing() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -94,6 +96,7 @@ fn changes_commited_if_no_min_saved_rows() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -107,6 +110,7 @@ fn changes_commited_if_no_min_saved_rows() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -160,6 +164,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -173,6 +178,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -227,6 +233,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -240,6 +247,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -280,6 +288,7 @@ fn run_panics_if_invalid_db_url() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -293,6 +302,7 @@ fn run_panics_if_invalid_db_url() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -336,6 +346,7 @@ fn run_only_affects_given_room_id() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -349,6 +360,7 @@ fn run_only_affects_given_room_id() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -406,6 +418,7 @@ fn run_respects_groups_to_compress() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url,
@@ -419,6 +432,7 @@ fn run_respects_groups_to_compress() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -492,6 +506,7 @@ fn run_is_idempotent_when_run_on_whole_room() {
let transactions = true;
let graphs = false;
let commit_changes = true;
let verify = true;
let config1 = Config::new(
db_url.clone(),
@@ -505,21 +520,23 @@ fn run_is_idempotent_when_run_on_whole_room() {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
let config2 = Config::new(
db_url.clone(),
room_id.clone(),
db_url,
room_id,
output_file2,
min_state_group,
groups_to_compress,
min_saved_rows,
max_state_group,
level_sizes.clone(),
level_sizes,
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();

View File

@@ -56,7 +56,7 @@ fn continue_run_called_twice_same_as_run() {
let start = Some(6);
let chunk_size = 7;
let level_info = chunk_stats_1.new_level_info.clone();
let level_info = chunk_stats_1.new_level_info;
// Run the compressor with those settings
let chunk_stats_2 = continue_run(start, chunk_size, &db_url, &room_id, &level_info).unwrap();

View File

@@ -181,12 +181,11 @@ impl<'a> Compressor<'a> {
panic!("Can only call `create_new_tree` once");
}
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
pb = ProgressBar::hidden();
let pb = if cfg!(feature = "no-progress-bars") {
ProgressBar::hidden()
} else {
pb = ProgressBar::new(self.original_state_map.len() as u64);
}
ProgressBar::new(self.original_state_map.len() as u64)
};
pb.set_style(
ProgressStyle::default_bar().template("[{elapsed_precise}] {bar} {pos}/{len} {msg}"),
);

View File

@@ -96,11 +96,7 @@ fn create_new_tree_does_nothing_if_already_compressed() {
let pred_group = initial_edges.get(&i);
// Need Option<i64> not Option<&i64>
let prev;
match pred_group {
Some(i) => prev = Some(*i),
None => prev = None,
}
let prev = pred_group.copied();
// insert that edge into the initial map
initial.insert(

View File

@@ -54,7 +54,7 @@ fn get_head_returns_head() {
#[test]
fn has_space_returns_true_if_empty() {
let l = Level::new(15);
assert_eq!(l.has_space(), true);
assert!(l.has_space());
}
#[test]
@@ -65,7 +65,7 @@ fn has_space_returns_true_if_part_full() {
l.update(1, true);
l.update(143, true);
l.update(15, true);
assert_eq!(l.has_space(), true);
assert!(l.has_space());
}
#[test]
@@ -76,5 +76,5 @@ fn has_space_returns_false_if_full() {
l.update(3, true);
l.update(4, true);
l.update(5, true);
assert_eq!(l.has_space(), false);
assert!(!l.has_space());
}

View File

@@ -145,11 +145,7 @@ fn stats_correct_if_no_changes() {
let pred_group = initial_edges.get(&i);
// Need Option<i64> not Option<&i64>
let prev;
match pred_group {
Some(i) => prev = Some(*i),
None => prev = None,
}
let prev = pred_group.copied();
// insert that edge into the initial map
initial.insert(

View File

@@ -372,12 +372,11 @@ fn get_initial_data_from_db(
// Copy the data from the database into a map
let mut state_group_map: BTreeMap<i64, StateGroupEntry> = BTreeMap::new();
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
pb = ProgressBar::hidden();
let pb = if cfg!(feature = "no-progress-bars") {
ProgressBar::hidden()
} else {
pb = ProgressBar::new_spinner();
}
ProgressBar::new_spinner()
};
pb.set_style(
ProgressStyle::default_spinner().template("{spinner} [{elapsed}] {pos} rows retrieved"),
);
@@ -537,12 +536,11 @@ pub fn send_changes_to_db(
debug!("Writing changes...");
// setup the progress bar
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
pb = ProgressBar::hidden();
let pb = if cfg!(feature = "no-progress-bars") {
ProgressBar::hidden()
} else {
pb = ProgressBar::new(old_map.len() as u64);
}
ProgressBar::new(old_map.len() as u64)
};
pb.set_style(
ProgressStyle::default_bar().template("[{elapsed_precise}] {bar} {pos}/{len} {msg}"),
);

View File

@@ -23,7 +23,7 @@
use log::{info, warn, LevelFilter};
use pyo3::{exceptions, prelude::*};
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
use clap::{crate_authors, crate_description, crate_name, crate_version, Arg, Command};
use indicatif::{ProgressBar, ProgressStyle};
use rayon::prelude::*;
use state_map::StateMap;
@@ -109,18 +109,21 @@ pub struct Config {
// Whether or not to commit changes to the database automatically
// N.B. currently assumes transactions is true (to be on the safe side)
commit_changes: bool,
// Whether to verify the correctness of the compressed state groups by
// comparing them to the original groups
verify: bool,
}
impl Config {
/// Build up config from command line arguments
pub fn parse_arguments() -> Config {
let matches = App::new(crate_name!())
let matches = Command::new(crate_name!())
.version(crate_version!())
.author(crate_authors!("\n"))
.about(crate_description!())
.arg(
Arg::with_name("postgres-url")
.short("p")
Arg::new("postgres-url")
.short('p')
.value_name("POSTGRES_LOCATION")
.help("The configruation for connecting to the postgres database.")
.long_help(concat!(
@@ -133,8 +136,8 @@ impl Config {
.takes_value(true)
.required(true),
).arg(
Arg::with_name("room_id")
.short("r")
Arg::new("room_id")
.short('r')
.value_name("ROOM_ID")
.help("The room to process")
.long_help(concat!(
@@ -144,23 +147,23 @@ impl Config {
.takes_value(true)
.required(true),
).arg(
Arg::with_name("min_state_group")
.short("b")
Arg::new("min_state_group")
.short('b')
.value_name("MIN_STATE_GROUP")
.help("The state group to start processing from (non inclusive)")
.takes_value(true)
.required(false),
).arg(
Arg::with_name("min_saved_rows")
.short("m")
Arg::new("min_saved_rows")
.short('m')
.value_name("COUNT")
.help("Abort if fewer than COUNT rows would be saved")
.long_help("If the compressor cannot save this many rows from the database then it will stop early")
.takes_value(true)
.required(false),
).arg(
Arg::with_name("groups_to_compress")
.short("n")
Arg::new("groups_to_compress")
.short('n')
.value_name("GROUPS_TO_COMPRESS")
.help("How many groups to load into memory to compress")
.long_help(concat!(
@@ -169,14 +172,14 @@ impl Config {
.takes_value(true)
.required(false),
).arg(
Arg::with_name("output_file")
.short("o")
Arg::new("output_file")
.short('o')
.value_name("FILE")
.help("File to output the changes to in SQL")
.takes_value(true),
).arg(
Arg::with_name("max_state_group")
.short("s")
Arg::new("max_state_group")
.short('s')
.value_name("MAX_STATE_GROUP")
.help("The maximum state group to process up to")
.long_help(concat!(
@@ -185,8 +188,8 @@ impl Config {
.takes_value(true)
.required(false),
).arg(
Arg::with_name("level_sizes")
.short("l")
Arg::new("level_sizes")
.short('l')
.value_name("LEVELS")
.help("Sizes of each new level in the compression algorithm, as a comma separated list.")
.long_help(concat!(
@@ -202,27 +205,34 @@ impl Config {
.default_value("100,50,25")
.takes_value(true),
).arg(
Arg::with_name("transactions")
.short("t")
Arg::new("transactions")
.short('t')
.help("Whether to wrap each state group change in a transaction")
.long_help(concat!("If this flag is set then then each change to a particular",
" state group is wrapped in a transaction. This should be done if you wish to",
" apply the changes while synapse is still running."))
.requires("output_file"),
).arg(
Arg::with_name("graphs")
.short("g")
Arg::new("graphs")
.short('g')
.help("Output before and after graphs")
.long_help(concat!("If this flag is set then output the node and edge information for",
" the state_group directed graph built up from the predecessor state_group links.",
" These can be looked at in something like Gephi (https://gephi.org)")),
).arg(
Arg::with_name("commit_changes")
.short("c")
Arg::new("commit_changes")
.short('c')
.help("Commit changes to the database")
.long_help(concat!("If this flag is set then the changes the compressor makes will",
" be committed to the database. This should be safe to use while synapse is running",
" as it assumes by default that the transactions flag is set")),
).arg(
Arg::new("no_verify")
.short('N')
.help("Do not double-check that the compression was performed correctly")
.long_help(concat!("If this flag is set then the verification of the compressed",
" state groups, which compares them to the original groups, is skipped. This",
" saves time at the cost of potentially generating mismatched state.")),
).get_matches();
let db_url = matches
@@ -253,7 +263,8 @@ impl Config {
.value_of("max_state_group")
.map(|s| s.parse().expect("max_state_group must be an integer"));
let level_sizes = value_t!(matches, "level_sizes", LevelSizes)
let level_sizes = matches
.value_of_t::<LevelSizes>("level_sizes")
.unwrap_or_else(|e| panic!("Unable to parse level_sizes: {}", e));
let transactions = matches.is_present("transactions");
@@ -262,6 +273,8 @@ impl Config {
let commit_changes = matches.is_present("commit_changes");
let verify = !matches.is_present("no_verify");
Config {
db_url: String::from(db_url),
output_file,
@@ -274,6 +287,7 @@ impl Config {
transactions,
graphs,
commit_changes,
verify,
}
}
}
@@ -372,7 +386,9 @@ pub fn run(mut config: Config) {
}
}
if config.verify {
check_that_maps_match(&state_group_map, new_state_group_map);
}
// If we are given an output file, we output the changes as SQL. If the
// `transactions` argument is set we wrap each change to a state group in a
@@ -492,12 +508,11 @@ fn output_sql(
info!("Writing changes...");
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
pb = ProgressBar::hidden();
let pb = if cfg!(feature = "no-progress-bars") {
ProgressBar::hidden()
} else {
pb = ProgressBar::new(old_map.len() as u64);
}
ProgressBar::new(old_map.len() as u64)
};
pb.set_style(
ProgressStyle::default_bar().template("[{elapsed_precise}] {bar} {pos}/{len} {msg}"),
);
@@ -607,12 +622,11 @@ fn check_that_maps_match(
) {
info!("Checking that state maps match...");
let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
pb = ProgressBar::hidden();
let pb = if cfg!(feature = "no-progress-bars") {
ProgressBar::hidden()
} else {
pb = ProgressBar::new(old_map.len() as u64);
}
ProgressBar::new(old_map.len() as u64)
};
pb.set_style(
ProgressStyle::default_bar().template("[{elapsed_precise}] {bar} {pos}/{len} {msg}"),
);
@@ -695,6 +709,7 @@ impl Config {
transactions: bool,
graphs: bool,
commit_changes: bool,
verify: bool,
) -> Result<Config, String> {
let mut output: Option<File> = None;
if let Some(file) = output_file {
@@ -722,6 +737,7 @@ impl Config {
transactions,
graphs,
commit_changes,
verify,
})
}
}
@@ -746,6 +762,7 @@ impl Config {
transactions = true,
graphs = false,
commit_changes = false,
verify = true,
)]
fn run_compression(
db_url: String,
@@ -759,6 +776,7 @@ fn run_compression(
transactions: bool,
graphs: bool,
commit_changes: bool,
verify: bool,
) -> PyResult<()> {
let config = Config::new(
db_url,
@@ -772,6 +790,7 @@ fn run_compression(
transactions,
graphs,
commit_changes,
verify,
);
match config {
Err(e) => Err(PyErr::new::<exceptions::PyException, _>(e)),
@@ -955,7 +974,6 @@ mod lib_tests {
#[test]
fn check_that_maps_match_returns_if_both_empty() {
check_that_maps_match(&BTreeMap::new(), &BTreeMap::new());
assert!(true);
}
#[test]
@@ -988,7 +1006,6 @@ mod lib_tests {
}
check_that_maps_match(&old_map, &BTreeMap::new());
assert!(true);
}
#[test]
@@ -1024,7 +1041,6 @@ mod lib_tests {
}
check_that_maps_match(&BTreeMap::new(), &new_map);
assert!(true);
}
#[test]
@@ -1056,7 +1072,6 @@ mod lib_tests {
}
check_that_maps_match(&BTreeMap::new(), &old_map.clone());
assert!(true);
}
#[test]
@@ -1119,7 +1134,6 @@ mod lib_tests {
}
check_that_maps_match(&old_map, &new_map);
assert!(true);
}
#[test]
@@ -1201,7 +1215,6 @@ mod lib_tests {
);
check_that_maps_match(&old_map, &new_map);
assert!(true);
}
//TODO: tests for correct SQL code produced by output_sql
@@ -1224,6 +1237,7 @@ mod pyo3_tests {
let transactions = false;
let graphs = false;
let commit_changes = false;
let verify = true;
let config = Config::new(
db_url.clone(),
@@ -1237,6 +1251,7 @@ mod pyo3_tests {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
@@ -1270,6 +1285,7 @@ mod pyo3_tests {
let transactions = true;
let graphs = true;
let commit_changes = true;
let verify = true;
let config = Config::new(
db_url.clone(),
@@ -1283,11 +1299,12 @@ mod pyo3_tests {
transactions,
graphs,
commit_changes,
verify,
)
.unwrap();
assert_eq!(config.db_url, db_url);
assert!(!config.output_file.is_none());
assert!(config.output_file.is_some());
assert_eq!(config.room_id, room_id);
assert_eq!(config.min_state_group, Some(3225));
assert_eq!(config.groups_to_compress, Some(970));

View File

@@ -18,7 +18,7 @@
#[cfg(feature = "jemalloc")]
#[global_allocator]
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
use log::LevelFilter;
use std::env;

View File

@@ -1,11 +1,11 @@
[package]
name = "synapse_auto_compressor"
authors = ["William Ashton"]
version = "0.1.2"
version = "0.1.3"
edition = "2018"
[package.metadata.maturin]
requires-python = ">=3.6"
requires-python = ">=3.7"
project-url = {Source = "https://github.com/matrix-org/rust-synapse-compress-state"}
classifier = [
"Development Status :: 4 - Beta",
@@ -13,11 +13,10 @@ classifier = [
]
[dependencies]
clap = "2.33.0"
openssl = "0.10.32"
postgres = "0.19.0"
postgres-openssl = "0.5.0"
jemallocator = "0.3.2"
tikv-jemallocator = "0.5.0"
rand = "0.8.0"
serial_test = "0.5.1"
synapse_compress_state = { path = "../", features = ["no-progress-bars"] }
@@ -25,12 +24,16 @@ env_logger = "0.9.0"
log = "0.4.14"
log-panics = "2.0.0"
anyhow = "1.0.42"
pyo3-log = "0.4.0"
pyo3-log = "0.6.0"
# Needed for pyo3 support
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies.clap]
version = "3.1.14"
features = ["cargo"]
[dependencies.pyo3]
version = "0.14.1"
features = ["extension-module","abi3-py36"]
version = "0.16.4"
features = ["extension-module"]

View File

@@ -17,11 +17,11 @@
//! continue from where it left off.
#[global_allocator]
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
use clap::{crate_authors, crate_description, crate_name, crate_version, Arg, Command};
use log::LevelFilter;
use std::{env, fs::OpenOptions};
use std::env;
use synapse_auto_compressor::{manager, state_saving, LevelInfo};
/// Execution starts here
@@ -29,12 +29,6 @@ fn main() {
// setup the logger for the synapse_auto_compressor
// The default can be overwritten with RUST_LOG
// see the README for more information
let log_file = OpenOptions::new()
.append(true)
.create(true)
.open("synapse_auto_compressor.log")
.unwrap_or_else(|e| panic!("Error occured while opening the log file: {}", e));
if env::var("RUST_LOG").is_err() {
let mut log_builder = env_logger::builder();
// Ensure panics still come through
@@ -47,7 +41,6 @@ fn main() {
} else {
// If RUST_LOG was set then use that
let mut log_builder = env_logger::Builder::from_env("RUST_LOG");
log_builder.target(env_logger::Target::Pipe(Box::new(log_file)));
// Ensure panics still come through
log_builder.filter_module("panic", LevelFilter::Error);
log_builder.init();
@@ -57,13 +50,13 @@ fn main() {
log::info!("synapse_auto_compressor started");
// parse the command line arguments using the clap crate
let arguments = App::new(crate_name!())
let arguments = Command::new(crate_name!())
.version(crate_version!())
.author(crate_authors!("\n"))
.about(crate_description!())
.arg(
Arg::with_name("postgres-url")
.short("p")
Arg::new("postgres-url")
.short('p')
.value_name("POSTGRES_LOCATION")
.help("The configruation for connecting to the postgres database.")
.long_help(concat!(
@@ -76,8 +69,8 @@ fn main() {
.takes_value(true)
.required(true),
).arg(
Arg::with_name("chunk_size")
.short("c")
Arg::new("chunk_size")
.short('c')
.value_name("COUNT")
.help("The maximum number of state groups to load into memroy at once")
.long_help(concat!(
@@ -92,8 +85,8 @@ fn main() {
.takes_value(true)
.required(true),
).arg(
Arg::with_name("default_levels")
.short("l")
Arg::new("default_levels")
.short('l')
.value_name("LEVELS")
.help("Sizes of each new level in the compression algorithm, as a comma separated list.")
.long_help(concat!(
@@ -110,8 +103,8 @@ fn main() {
.takes_value(true)
.required(false),
).arg(
Arg::with_name("number_of_chunks")
.short("n")
Arg::new("number_of_chunks")
.short('n')
.value_name("CHUNKS_TO_COMPRESS")
.help("The number of chunks to compress")
.long_help(concat!(
@@ -134,7 +127,8 @@ fn main() {
.expect("A chunk size is required");
// The default structure to use when compressing
let default_levels = value_t!(arguments, "default_levels", LevelInfo)
let default_levels = arguments
.value_of_t::<LevelInfo>("default_levels")
.unwrap_or_else(|e| panic!("Unable to parse default levels: {}", e));
// The number of rooms to compress with this tool