diff --git a/benches/cache.rs b/benches/cache.rs index 6e68291..cc3a81b 100644 --- a/benches/cache.rs +++ b/benches/cache.rs @@ -1,7 +1,10 @@ use std::cell::Cell; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use redu::{cache::tests::*, restic::Snapshot}; +use redu::{ + cache::{tests::*, Migrator}, + restic::Snapshot, +}; pub fn criterion_benchmark(c: &mut Criterion) { c.bench_function("merge sizetree", |b| { @@ -12,10 +15,47 @@ pub fn criterion_benchmark(c: &mut Criterion) { b.iter(move || sizetree0.take().merge(black_box(sizetree1.take()))); }); - c.bench_function("create and save snapshot", |b| { - with_cache_open(|mut cache| { - let foo = Snapshot { - id: "foo".to_string(), + c.bench_function("save snapshot", |b| { + let foo = Snapshot { + id: "foo".to_string(), + time: mk_datetime(2024, 4, 12, 12, 00, 00), + parent: Some("bar".to_string()), + tree: "sometree".to_string(), + paths: vec![ + "/home/user".to_string(), + "/etc".to_string(), + "/var".to_string(), + ], + hostname: Some("foo.com".to_string()), + username: Some("user".to_string()), + uid: Some(123), + gid: Some(456), + excludes: vec![ + ".cache".to_string(), + "Cache".to_string(), + "/home/user/Downloads".to_string(), + ], + tags: vec!["foo_machine".to_string(), "rewrite".to_string()], + original_id: Some("fefwfwew".to_string()), + program_version: Some("restic 0.16.0".to_string()), + }; + b.iter_with_setup( + || { + let tempfile = Tempfile::new(); + let cache = + Migrator::open(&tempfile.0).unwrap().migrate().unwrap(); + (tempfile, cache, generate_sizetree(6, 12)) + }, + |(_tempfile, mut cache, tree)| { + cache.save_snapshot(&foo, tree).unwrap() + }, + ); + }); + + c.bench_function("save lots of small snapshots", |b| { + fn mk_snapshot(id: String) -> Snapshot { + Snapshot { + id, time: mk_datetime(2024, 4, 12, 12, 00, 00), parent: Some("bar".to_string()), tree: "sometree".to_string(), @@ -36,16 +76,27 @@ pub fn criterion_benchmark(c: &mut Criterion) { tags: vec!["foo_machine".to_string(), "rewrite".to_string()], original_id: Some("fefwfwew".to_string()), program_version: Some("restic 0.16.0".to_string()), - }; - b.iter(move || { - cache - .save_snapshot( - &foo, - generate_sizetree(black_box(6), black_box(12)), - ) - .unwrap(); - }); - }) + } + } + + b.iter_with_setup( + || { + let tempfile = Tempfile::new(); + let cache = + Migrator::open(&tempfile.0).unwrap().migrate().unwrap(); + (tempfile, cache, generate_sizetree(1, 0)) + }, + |(_tempfile, mut cache, tree)| { + for i in 0..10_000 { + cache + .save_snapshot( + &mk_snapshot(i.to_string()), + tree.clone(), + ) + .unwrap(); + } + }, + ); }); } diff --git a/src/cache/mod.rs b/src/cache/mod.rs index ce96bd1..9aad2b2 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -1,11 +1,15 @@ -use std::{collections::HashSet, path::Path}; +use std::{ + cmp::{max, Reverse}, + collections::{HashMap, HashSet}, + path::Path, +}; use camino::{Utf8Path, Utf8PathBuf}; use chrono::{DateTime, Utc}; use log::trace; use rusqlite::{ functions::FunctionFlags, params, types::FromSqlError, Connection, - OptionalExtension, Row, + OptionalExtension, }; use thiserror::Error; @@ -117,6 +121,14 @@ impl Cache { Ok(path_id) } + fn entries_tables( + &self, + ) -> Result, rusqlite::Error> { + Ok(get_tables(&self.conn)? + .into_iter() + .filter(|name| name.starts_with("entries_"))) + } + /// This returns the children files/directories of the given path. /// Each entry's size is the largest size of that file/directory across /// all snapshots. @@ -124,112 +136,109 @@ impl Cache { &self, path_id: Option, ) -> Result, rusqlite::Error> { - let aux = |row: &Row| { - Ok(Entry { - path_id: PathId(row.get("path_id")?), - component: row.get("component")?, - size: row.get("size")?, - is_dir: row.get("is_dir")?, - }) - }; let raw_path_id = o_path_id_to_raw_u64(path_id); - let cte_stmt_string = get_tables(&self.conn)? - .into_iter() - .filter(|name| name.starts_with("entries_")) - .map(|table| { - format!( - "SELECT \ - path_id, \ - component, \ - size, \ - is_dir \ - FROM \"{table}\" JOIN paths ON path_id = paths.id \ - WHERE parent_id = {raw_path_id}\n" - ) - }) - .intersperse(String::from(" UNION ALL ")) - .collect::(); - if cte_stmt_string.is_empty() { - return Ok(vec![]); + let mut entries: Vec = Vec::new(); + let mut index: HashMap = HashMap::new(); + for table in self.entries_tables()? { + let stmt_str = format!( + "SELECT \ + path_id, \ + component, \ + size, \ + is_dir \ + FROM \"{table}\" JOIN paths ON path_id = paths.id \ + WHERE parent_id = {raw_path_id}\n", + ); + let mut stmt = self.conn.prepare(&stmt_str)?; + let rows = stmt.query_map([], |row| { + Ok(Entry { + path_id: PathId(row.get("path_id")?), + component: row.get("component")?, + size: row.get("size")?, + is_dir: row.get("is_dir")?, + }) + })?; + for row in rows { + let row = row?; + let path_id = row.path_id; + match index.get(&path_id) { + None => { + entries.push(row); + index.insert(path_id, entries.len() - 1); + } + Some(i) => { + let entry = &mut entries[*i]; + entry.size = max(entry.size, row.size); + entry.is_dir = entry.is_dir || row.is_dir; + } + } + } } - let mut stmt = self.conn.prepare(&format!( - "WITH rich_entries AS ({cte_stmt_string}) \ - SELECT \ - path_id, \ - component, \ - max(size) as size, \ - max(is_dir) as is_dir \ - FROM rich_entries \ - GROUP BY path_id \ - ORDER BY size DESC", - ))?; - let rows = stmt.query_map([], aux)?; - rows.collect() + entries.sort_by_key(|e| Reverse(e.size)); + Ok(entries) } pub fn get_entry_details( &self, path_id: PathId, - ) -> Result { - let aux = |row: &Row| -> Result { - Ok(EntryDetails { - max_size: row.get("max_size")?, - max_size_snapshot_hash: row.get("max_size_snapshot_hash")?, - first_seen: timestamp_to_datetime(row.get("first_seen")?)?, - first_seen_snapshot_hash: row - .get("first_seen_snapshot_hash")?, - last_seen: timestamp_to_datetime(row.get("last_seen")?)?, - last_seen_snapshot_hash: row.get("last_seen_snapshot_hash")?, - }) - }; + ) -> Result, Error> { let raw_path_id = path_id.0; - let rich_entries_cte = get_tables(&self.conn)? - .iter() - .filter_map(|name| name.strip_prefix("entries_")) - .map(|snapshot_hash| { - format!( + let run_query = + |table: &str| -> Result<(String, usize, DateTime), Error> { + let snapshot_hash = table.strip_prefix("entries_").unwrap(); + let stmt_str = format!( "SELECT \ - hash, \ - size, \ - time \ - FROM \"entries_{snapshot_hash}\" \ - JOIN paths ON path_id = paths.id \ - JOIN snapshots ON hash = '{snapshot_hash}' \ - WHERE path_id = {raw_path_id}\n" - ) - }) - .intersperse(String::from(" UNION ALL ")) - .collect::(); - let query = format!( - "WITH \ - rich_entries AS ({rich_entries_cte}), \ - first_seen AS ( - SELECT hash, time - FROM rich_entries - ORDER BY time ASC - LIMIT 1), \ - last_seen AS ( - SELECT hash, time - FROM rich_entries - ORDER BY time DESC - LIMIT 1), \ - max_size AS ( - SELECT hash, size - FROM rich_entries - ORDER BY size DESC, time DESC - LIMIT 1) \ - SELECT \ - max_size.size AS max_size, \ - max_size.hash AS max_size_snapshot_hash, \ - first_seen.time AS first_seen, \ - first_seen.hash as first_seen_snapshot_hash, \ - last_seen.time AS last_seen, \ - last_seen.hash as last_seen_snapshot_hash \ - FROM max_size - JOIN first_seen ON 1=1 - JOIN last_seen ON 1=1" - ); - self.conn.query_row_and_then(&query, [], aux) + hash, \ + size, \ + time \ + FROM \"{table}\" \ + JOIN paths ON path_id = paths.id \ + JOIN snapshots ON hash = '{snapshot_hash}' \ + WHERE path_id = {raw_path_id}\n" + ); + let mut stmt = self.conn.prepare(&stmt_str)?; + let (hash, size, timestamp) = stmt.query_row([], |row| { + Ok((row.get("hash")?, row.get("size")?, row.get("time")?)) + })?; + let time = timestamp_to_datetime(timestamp)?; + Ok((hash, size, time)) + }; + + let mut entries_tables = self.entries_tables()?; + let mut details = match entries_tables.next() { + None => return Ok(None), + Some(table) => { + let (hash, size, time) = run_query(&table)?; + EntryDetails { + max_size: size, + max_size_snapshot_hash: hash.clone(), + first_seen: time, + first_seen_snapshot_hash: hash.clone(), + last_seen: time, + last_seen_snapshot_hash: hash, + } + } + }; + let mut max_size_time = details.first_seen; // Time of the max_size snapshot + for table in entries_tables { + let (hash, size, time) = run_query(&table)?; + if size > details.max_size + || (size == details.max_size && time > max_size_time) + { + details.max_size = size; + details.max_size_snapshot_hash = hash.clone(); + max_size_time = time; + } + if time < details.first_seen { + details.first_seen = time; + details.first_seen_snapshot_hash = hash.clone(); + } + if time > details.last_seen { + details.last_seen = time; + details.last_seen_snapshot_hash = hash; + } + } + Ok(Some(details)) } pub fn save_snapshot( @@ -383,7 +392,7 @@ impl Cache { // A PathId should never be 0. // This is reserved for the absolute root and should match None -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] #[repr(transparent)] pub struct PathId(u64); @@ -407,7 +416,7 @@ pub struct Entry { pub is_dir: bool, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub struct EntryDetails { pub max_size: usize, pub max_size_snapshot_hash: String, diff --git a/src/cache/tests.rs b/src/cache/tests.rs index 3d53fe9..0c0cc3d 100644 --- a/src/cache/tests.rs +++ b/src/cache/tests.rs @@ -1,11 +1,21 @@ -use std::{cmp::Reverse, convert::Infallible, fs, iter, path::PathBuf}; +use std::{cmp::Reverse, convert::Infallible, fs, iter, mem, path::PathBuf}; use camino::{Utf8Path, Utf8PathBuf}; use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use rusqlite::Connection; -use scopeguard::defer; use uuid::Uuid; +use super::LATEST_VERSION; +use crate::{ + cache::{ + determine_version, + filetree::{InsertError, SizeTree}, + get_tables, timestamp_to_datetime, Cache, EntryDetails, Migrator, + VersionId, + }, + restic::Snapshot, +}; + pub fn mk_datetime( year: i32, month: u32, @@ -21,33 +31,20 @@ pub fn mk_datetime( .and_utc() } -use super::{determine_version, get_tables, LATEST_VERSION}; -use crate::{ - cache::{ - filetree::{InsertError, SizeTree}, - Cache, Migrator, VersionId, - }, - restic::Snapshot, -}; - -pub fn tempfile() -> PathBuf { - let mut file = std::env::temp_dir(); - file.push(Uuid::new_v4().to_string()); - file -} +pub struct Tempfile(pub PathBuf); -pub fn with_cache_open_with_target( - target: VersionId, - body: impl FnOnce(Cache), -) { - let file = tempfile(); - defer! { fs::remove_file(&file).unwrap(); } - let migrator = Migrator::open_with_target(&file, target).unwrap(); - body(migrator.migrate().unwrap()); +impl Drop for Tempfile { + fn drop(&mut self) { + fs::remove_file(mem::take(&mut self.0)).unwrap(); + } } -pub fn with_cache_open(body: impl FnOnce(Cache)) { - with_cache_open_with_target(LATEST_VERSION, body); +impl Tempfile { + pub fn new() -> Self { + let mut path = std::env::temp_dir(); + path.push(Uuid::new_v4().to_string()); + Tempfile(path) + } } pub fn path_parent(path: &Utf8Path) -> Option { @@ -114,6 +111,48 @@ fn to_sorted_entries(tree: &SizeTree) -> Vec<(Vec<&str>, usize, bool)> { entries } +fn assert_get_entries_correct_at_path>( + cache: &Cache, + tree: &SizeTree, + path: P, +) { + let mut db_entries = { + let path_id = if path.as_ref().as_str().is_empty() { + None + } else { + cache.get_path_id_by_path(path.as_ref()).unwrap() + }; + if path_id.is_none() && !path.as_ref().as_str().is_empty() { + // path was not found + vec![] + } else { + cache + .get_entries(path_id) + .unwrap() + .into_iter() + .map(|e| (e.component, e.size, e.is_dir)) + .collect::>() + } + }; + db_entries.sort_by_key(|(component, _, _)| component.clone()); + let mut entries = to_sorted_entries(&tree) + .iter() + .filter_map(|(components, size, is_dir)| { + // keep only the ones with parent == loc + let (last, parent_cs) = components.split_last()?; + let parent = parent_cs.iter().collect::(); + if parent == path.as_ref() { + Some((last.to_string(), *size, *is_dir)) + } else { + None + } + }) + .collect::>(); + entries.sort_by_key(|(_, size, _)| Reverse(*size)); + entries.sort_by_key(|(component, _, _)| component.clone()); + assert_eq!(db_entries, entries); +} + fn example_tree_0() -> SizeTree { let mut sizetree = SizeTree::new(); assert_eq!(sizetree.insert(["a", "0", "x"], 1), Ok(())); @@ -268,134 +307,159 @@ fn merge_commutativity() { #[test] fn cache_snapshots_entries() { - with_cache_open(|mut cache| { - fn test_snapshots(cache: &Cache, mut snapshots: Vec<&Snapshot>) { - let mut db_snapshots = cache.get_snapshots().unwrap(); - db_snapshots.sort_unstable_by(|s0, s1| s0.id.cmp(&s1.id)); - snapshots.sort_unstable_by(|s0, s1| s0.id.cmp(&s1.id)); - for (s0, s1) in iter::zip(db_snapshots.iter(), snapshots.iter()) { - assert_eq!(s0.id, s1.id); - assert_eq!(s0.time, s1.time); - assert_eq!(s0.parent, s1.parent); - assert_eq!(s0.tree, s1.tree); - assert_eq!(s0.hostname, s1.hostname); - assert_eq!(s0.username, s1.username); - assert_eq!(s0.uid, s1.uid); - assert_eq!(s0.gid, s1.gid); - assert_eq!(s0.original_id, s1.original_id); - assert_eq!(s0.program_version, s1.program_version); - - let mut s0_paths: Vec = s0.paths.to_vec(); - s0_paths.sort(); - let mut s1_paths: Vec = s1.paths.to_vec(); - s1_paths.sort(); - assert_eq!(s0_paths, s1_paths); - - let mut s0_excludes: Vec = s0.excludes.to_vec(); - s0_excludes.sort(); - let mut s1_excludes: Vec = s1.excludes.to_vec(); - s1_excludes.sort(); - assert_eq!(s0_excludes, s1_excludes); - - let mut s0_tags: Vec = s0.tags.to_vec(); - s0_tags.sort(); - let mut s1_tags: Vec = s1.tags.to_vec(); - s1_tags.sort(); - assert_eq!(s0_tags, s1_tags); - } + fn test_snapshots(cache: &Cache, mut snapshots: Vec<&Snapshot>) { + let mut db_snapshots = cache.get_snapshots().unwrap(); + db_snapshots.sort_unstable_by(|s0, s1| s0.id.cmp(&s1.id)); + snapshots.sort_unstable_by(|s0, s1| s0.id.cmp(&s1.id)); + for (s0, s1) in iter::zip(db_snapshots.iter(), snapshots.iter()) { + assert_eq!(s0.id, s1.id); + assert_eq!(s0.time, s1.time); + assert_eq!(s0.parent, s1.parent); + assert_eq!(s0.tree, s1.tree); + assert_eq!(s0.hostname, s1.hostname); + assert_eq!(s0.username, s1.username); + assert_eq!(s0.uid, s1.uid); + assert_eq!(s0.gid, s1.gid); + assert_eq!(s0.original_id, s1.original_id); + assert_eq!(s0.program_version, s1.program_version); + + let mut s0_paths: Vec = s0.paths.to_vec(); + s0_paths.sort(); + let mut s1_paths: Vec = s1.paths.to_vec(); + s1_paths.sort(); + assert_eq!(s0_paths, s1_paths); + + let mut s0_excludes: Vec = s0.excludes.to_vec(); + s0_excludes.sort(); + let mut s1_excludes: Vec = s1.excludes.to_vec(); + s1_excludes.sort(); + assert_eq!(s0_excludes, s1_excludes); + + let mut s0_tags: Vec = s0.tags.to_vec(); + s0_tags.sort(); + let mut s1_tags: Vec = s1.tags.to_vec(); + s1_tags.sort(); + assert_eq!(s0_tags, s1_tags); } + } - fn test_get_max_file_sizes>( - cache: &Cache, - tree: &SizeTree, - path: P, - ) { - let mut db_entries = { - let path_id = if path.as_ref().as_str().is_empty() { - None - } else { - cache.get_path_id_by_path(path.as_ref()).unwrap() - }; - if path_id.is_none() && !path.as_ref().as_str().is_empty() { - // path was not found - vec![] - } else { - cache - .get_entries(path_id) - .unwrap() - .into_iter() - .map(|e| (e.component, e.size, e.is_dir)) - .collect::>() - } - }; - db_entries.sort_by_key(|(component, _, _)| component.clone()); - let mut entries = to_sorted_entries(&tree) - .iter() - .filter_map(|(components, size, is_dir)| { - // keep only the ones with parent == loc - let (last, parent_cs) = components.split_last()?; - let parent = parent_cs.iter().collect::(); - if parent == path.as_ref() { - Some((last.to_string(), *size, *is_dir)) - } else { - None - } - }) - .collect::>(); - entries.sort_by_key(|(_, size, _)| Reverse(*size)); - entries.sort_by_key(|(component, _, _)| component.clone()); - assert_eq!(db_entries, entries); - } + let tempfile = Tempfile::new(); + let mut cache = Migrator::open(&tempfile.0).unwrap().migrate().unwrap(); + + let foo = Snapshot { + id: "foo".to_string(), + time: mk_datetime(2024, 4, 12, 12, 00, 00), + parent: Some("bar".to_string()), + tree: "sometree".to_string(), + paths: vec![ + "/home/user".to_string(), + "/etc".to_string(), + "/var".to_string(), + ], + hostname: Some("foo.com".to_string()), + username: Some("user".to_string()), + uid: Some(123), + gid: Some(456), + excludes: vec![ + ".cache".to_string(), + "Cache".to_string(), + "/home/user/Downloads".to_string(), + ], + tags: vec!["foo_machine".to_string(), "rewrite".to_string()], + original_id: Some("fefwfwew".to_string()), + program_version: Some("restic 0.16.0".to_string()), + }; + + let bar = Snapshot { + id: "bar".to_string(), + time: mk_datetime(2025, 5, 12, 17, 00, 00), + parent: Some("wat".to_string()), + tree: "anothertree".to_string(), + paths: vec!["/home/user".to_string()], + hostname: Some("foo.com".to_string()), + username: Some("user".to_string()), + uid: Some(123), + gid: Some(456), + excludes: vec![ + ".cache".to_string(), + "Cache".to_string(), + "/home/user/Downloads".to_string(), + ], + tags: vec!["foo_machine".to_string(), "rewrite".to_string()], + original_id: Some("fefwfwew".to_string()), + program_version: Some("restic 0.16.0".to_string()), + }; + + let wat = Snapshot { + id: "wat".to_string(), + time: mk_datetime(2023, 5, 12, 17, 00, 00), + parent: None, + tree: "fwefwfwwefwefwe".to_string(), + paths: vec![], + hostname: None, + username: None, + uid: None, + gid: None, + excludes: vec![], + tags: vec![], + original_id: None, + program_version: None, + }; + + cache.save_snapshot(&foo, example_tree_0()).unwrap(); + cache.save_snapshot(&bar, example_tree_1()).unwrap(); + cache.save_snapshot(&wat, example_tree_2()).unwrap(); + + test_snapshots(&cache, vec![&foo, &bar, &wat]); + + fn test_entries(cache: &Cache, sizetree: SizeTree) { + assert_get_entries_correct_at_path(cache, &sizetree, ""); + assert_get_entries_correct_at_path(cache, &sizetree, "a"); + assert_get_entries_correct_at_path(cache, &sizetree, "b"); + assert_get_entries_correct_at_path(cache, &sizetree, "a/0"); + assert_get_entries_correct_at_path(cache, &sizetree, "a/1"); + assert_get_entries_correct_at_path(cache, &sizetree, "a/2"); + assert_get_entries_correct_at_path(cache, &sizetree, "b/0"); + assert_get_entries_correct_at_path(cache, &sizetree, "b/1"); + assert_get_entries_correct_at_path(cache, &sizetree, "b/2"); + assert_get_entries_correct_at_path(cache, &sizetree, "something"); + assert_get_entries_correct_at_path(cache, &sizetree, "a/something"); + } - let foo = Snapshot { - id: "foo".to_string(), - time: mk_datetime(2024, 4, 12, 12, 00, 00), - parent: Some("bar".to_string()), - tree: "sometree".to_string(), - paths: vec![ - "/home/user".to_string(), - "/etc".to_string(), - "/var".to_string(), - ], - hostname: Some("foo.com".to_string()), - username: Some("user".to_string()), - uid: Some(123), - gid: Some(456), - excludes: vec![ - ".cache".to_string(), - "Cache".to_string(), - "/home/user/Downloads".to_string(), - ], - tags: vec!["foo_machine".to_string(), "rewrite".to_string()], - original_id: Some("fefwfwew".to_string()), - program_version: Some("restic 0.16.0".to_string()), - }; + test_entries( + &cache, + example_tree_0().merge(example_tree_1()).merge(example_tree_2()), + ); - let bar = Snapshot { - id: "bar".to_string(), - time: mk_datetime(2025, 5, 12, 17, 00, 00), - parent: Some("wat".to_string()), - tree: "anothertree".to_string(), - paths: vec!["/home/user".to_string()], - hostname: Some("foo.com".to_string()), - username: Some("user".to_string()), - uid: Some(123), - gid: Some(456), - excludes: vec![ - ".cache".to_string(), - "Cache".to_string(), - "/home/user/Downloads".to_string(), - ], - tags: vec!["foo_machine".to_string(), "rewrite".to_string()], - original_id: Some("fefwfwew".to_string()), - program_version: Some("restic 0.16.0".to_string()), - }; + // Deleting a non-existent snapshot does nothing + cache.delete_snapshot("non-existent").unwrap(); + test_snapshots(&cache, vec![&foo, &bar, &wat]); + test_entries( + &cache, + example_tree_0().merge(example_tree_1()).merge(example_tree_2()), + ); + + // Remove bar + cache.delete_snapshot("bar").unwrap(); + test_snapshots(&cache, vec![&foo, &wat]); + test_entries(&cache, example_tree_0().merge(example_tree_2())); +} + +// TODO: Ideally we would run more than 10_000 but at the moment this is too slow. +#[test] +fn lots_of_snapshots() { + let tempfile = Tempfile::new(); + let mut cache = Migrator::open(&tempfile.0).unwrap().migrate().unwrap(); - let wat = Snapshot { - id: "wat".to_string(), - time: mk_datetime(2023, 5, 12, 17, 00, 00), + const NUM_SNAPSHOTS: usize = 10_000; + + // Insert lots of snapshots + for i in 0..NUM_SNAPSHOTS { + let snapshot = Snapshot { + id: i.to_string(), + time: timestamp_to_datetime(i as i64).unwrap(), parent: None, - tree: "fwefwfwwefwefwe".to_string(), + tree: i.to_string(), paths: vec![], hostname: None, username: None, @@ -406,44 +470,25 @@ fn cache_snapshots_entries() { original_id: None, program_version: None, }; + cache.save_snapshot(&snapshot, example_tree_0()).unwrap(); + } - cache.save_snapshot(&foo, example_tree_0()).unwrap(); - cache.save_snapshot(&bar, example_tree_1()).unwrap(); - cache.save_snapshot(&wat, example_tree_2()).unwrap(); - - test_snapshots(&cache, vec![&foo, &bar, &wat]); - - fn test_entries(cache: &Cache, sizetree: SizeTree) { - test_get_max_file_sizes(cache, &sizetree, ""); - test_get_max_file_sizes(cache, &sizetree, "a"); - test_get_max_file_sizes(cache, &sizetree, "b"); - test_get_max_file_sizes(cache, &sizetree, "a/0"); - test_get_max_file_sizes(cache, &sizetree, "a/1"); - test_get_max_file_sizes(cache, &sizetree, "a/2"); - test_get_max_file_sizes(cache, &sizetree, "b/0"); - test_get_max_file_sizes(cache, &sizetree, "b/1"); - test_get_max_file_sizes(cache, &sizetree, "b/2"); - test_get_max_file_sizes(cache, &sizetree, "something"); - test_get_max_file_sizes(cache, &sizetree, "a/something"); - } + // get_entries + let tree = example_tree_0(); + for path in ["", "a", "a/0", "a/1", "a/1/x", "a/something"] { + assert_get_entries_correct_at_path(&cache, &tree, path); + } - test_entries( - &cache, - example_tree_0().merge(example_tree_1()).merge(example_tree_2()), - ); - - // Deleting a non-existent snapshot does nothing - cache.delete_snapshot("non-existent").unwrap(); - test_snapshots(&cache, vec![&foo, &bar, &wat]); - test_entries( - &cache, - example_tree_0().merge(example_tree_1()).merge(example_tree_2()), - ); - - // Remove bar - cache.delete_snapshot("bar").unwrap(); - test_snapshots(&cache, vec![&foo, &wat]); - test_entries(&cache, example_tree_0().merge(example_tree_2())); + // get_entry_details + let path_id = cache.get_path_id_by_path("a/0".into()).unwrap().unwrap(); + let details = cache.get_entry_details(path_id).unwrap().unwrap(); + assert_eq!(details, EntryDetails { + max_size: 4, + max_size_snapshot_hash: (NUM_SNAPSHOTS - 1).to_string(), + first_seen: timestamp_to_datetime(0).unwrap(), + first_seen_snapshot_hash: 0.to_string(), + last_seen: timestamp_to_datetime((NUM_SNAPSHOTS - 1) as i64).unwrap(), + last_seen_snapshot_hash: (NUM_SNAPSHOTS - 1).to_string(), }); } @@ -469,9 +514,9 @@ fn assert_marks(cache: &Cache, marks: &[&str]) { fn populate_v0<'a>( marks: impl IntoIterator, -) -> Result { - let file = tempfile(); - let mut cache = Migrator::open_with_target(&file, 0)?.migrate()?; +) -> Result { + let file = Tempfile::new(); + let mut cache = Migrator::open_with_target(&file.0, 0)?.migrate()?; let tx = cache.conn.transaction()?; { let mut marks_stmt = @@ -490,7 +535,7 @@ fn test_migrate_v0_to_v1() { let file = populate_v0(marks).unwrap(); let cache = - Migrator::open_with_target(&file, 1).unwrap().migrate().unwrap(); + Migrator::open_with_target(&file.0, 1).unwrap().migrate().unwrap(); assert_tables(&cache.conn, &[ "metadata_integer", diff --git a/src/main.rs b/src/main.rs index e6e08e4..e547127 100644 --- a/src/main.rs +++ b/src/main.rs @@ -191,7 +191,8 @@ fn main() -> anyhow::Result<()> { Some(Event::Entries { path_id, entries }) } Action::GetEntryDetails(path_id) => - Some(Event::EntryDetails(cache.get_entry_details(path_id)?)), + Some(Event::EntryDetails(cache.get_entry_details(path_id)? + .expect("The UI requested a GetEntryDetails with a path_id that does not exist"))), Action::UpsertMark(path) => { cache.upsert_mark(&path)?; Some(Event::Marks(cache.get_marks()?))