Skip to content

Commit

Permalink
Merge branch 'main' into jan/time-panel-chunkification
Browse files Browse the repository at this point in the history
  • Loading branch information
jprochazk committed Jul 22, 2024
2 parents 473dc59 + ddf60c4 commit 1bfbfeb
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 11 deletions.
4 changes: 4 additions & 0 deletions crates/store/re_chunk/src/shuffle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ impl Chunk {
return chunk;
};

if time_chunk.is_sorted() {
return chunk;
}

#[cfg(not(target_arch = "wasm32"))]
let now = std::time::Instant::now();

Expand Down
31 changes: 27 additions & 4 deletions crates/store/re_chunk_store/src/events.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::sync::Arc;
use std::{collections::BTreeSet, sync::Arc};

use re_chunk::Chunk;
use re_log_types::StoreId;
Expand Down Expand Up @@ -106,29 +106,51 @@ pub struct ChunkStoreDiff {
pub kind: ChunkStoreDiffKind,

/// The chunk that was added or removed.
///
/// If the addition of a chunk to the store triggered a compaction, that chunk _pre-compaction_ is
/// what will be exposed here.
/// This allows subscribers to only process data that is new, as opposed to having to reprocess
/// old rows that appear to have been removed and then reinserted due to compaction.
///
/// To keep track of what chunks were merged with what chunks, use the [`ChunkStoreDiff::compacted`]
/// field below.
//
// NOTE: We purposefully use an `Arc` instead of a `ChunkId` here because we want to make sure that all
// downstream subscribers get a chance to inspect the data in the chunk before it gets permanently
// deallocated.
pub chunk: Arc<Chunk>,

/// Reports which [`ChunkId`]s were merged into a new [`ChunkId`] (srcs, dst) during a compaction.
///
/// This is only specified if an addition to the store triggered a compaction.
/// When that happens, it is guaranteed that [`ChunkStoreDiff::chunk`] will be present in the
/// set of source chunks below, since it was compacted on arrival.
///
/// A corollary to that is that the destination [`ChunkId`] must have never been seen before.
pub compacted: Option<(BTreeSet<ChunkId>, ChunkId)>,
}

impl PartialEq for ChunkStoreDiff {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
let Self { kind, chunk } = self;
*kind == rhs.kind && chunk.id() == rhs.chunk.id()
let Self {
kind,
chunk,
compacted,
} = self;
*kind == rhs.kind && chunk.id() == rhs.chunk.id() && compacted == &rhs.compacted
}
}

impl Eq for ChunkStoreDiff {}

impl ChunkStoreDiff {
#[inline]
pub fn addition(chunk: Arc<Chunk>) -> Self {
pub fn addition(chunk: Arc<Chunk>, compacted: Option<(BTreeSet<ChunkId>, ChunkId)>) -> Self {
Self {
kind: ChunkStoreDiffKind::Addition,
chunk,
compacted,
}
}

Expand All @@ -137,6 +159,7 @@ impl ChunkStoreDiff {
Self {
kind: ChunkStoreDiffKind::Deletion,
chunk,
compacted: None,
}
}

Expand Down
35 changes: 31 additions & 4 deletions crates/store/re_chunk_store/src/writes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ impl ChunkStore {

self.insert_id += 1;

let non_compacted_chunk = Arc::clone(chunk); // we'll need it to create the store event

let (chunk, diffs) = if chunk.is_static() {
// Static data: make sure to keep the most recent chunk available for each component column.
re_tracing::profile_scope!("static");
Expand Down Expand Up @@ -97,7 +99,10 @@ impl ChunkStore {

(
Arc::clone(chunk),
vec![ChunkStoreDiff::addition(Arc::clone(chunk))],
vec![ChunkStoreDiff::addition(
non_compacted_chunk, /* added */
None, /* compacted */
)],
)
} else {
// Temporal data: just index the chunk on every dimension of interest.
Expand Down Expand Up @@ -222,12 +227,34 @@ impl ChunkStore {

self.temporal_chunks_stats += ChunkStoreChunkStats::from_chunk(&chunk_or_compacted);

let mut diffs = vec![ChunkStoreDiff::addition(Arc::clone(&chunk_or_compacted))];
let mut diff = ChunkStoreDiff::addition(
// NOTE: We are advertising only the non-compacted chunk as "added", i.e. only the new data.
//
// This makes sure that downstream subscribers only have to process what is new,
// instead of needlessly reprocessing old rows that would appear to have been
// removed and reinserted due to compaction.
//
// Subscribers will still be capable of tracking which chunks have been merged with which
// by using the compaction report that we fill below.
Arc::clone(&non_compacted_chunk), /* added */
None, /* compacted */
);
if let Some(elected_chunk) = &elected_chunk {
diffs.extend(self.remove_chunk(elected_chunk.id()));
// NOTE: The chunk that we've just added has been compacted already!
let srcs = std::iter::once(non_compacted_chunk.id())
.chain(
self.remove_chunk(elected_chunk.id())
.into_iter()
.filter(|diff| diff.kind == crate::ChunkStoreDiffKind::Deletion)
.map(|diff| diff.chunk.id()),
)
.collect();
let dst = chunk_or_compacted.id();

diff.compacted = Some((srcs, dst));
}

(chunk_or_compacted, diffs)
(chunk_or_compacted, vec![diff])
};

self.chunks_per_chunk_id.insert(chunk.id(), chunk.clone());
Expand Down
2 changes: 0 additions & 2 deletions crates/store/re_entity_db/src/entity_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -372,8 +372,6 @@ impl EntityDb {
self.time_histogram_per_timeline.on_events(&store_events);
self.query_caches.on_events(&store_events);
self.tree.on_store_additions(&store_events);
// Tree deletions depend on data store, so data store must have been notified of deletions already.
self.tree.on_store_deletions(&self.data_store);

// We inform the stats last, since it measures e2e latency.
self.stats.on_events(&store_events);
Expand Down
1 change: 1 addition & 0 deletions crates/store/re_query/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ impl ChunkStoreSubscriber for Caches {
let ChunkStoreDiff {
kind: _, // Don't care: both additions and deletions invalidate query results.
chunk,
compacted: _,
} = diff;

{
Expand Down
2 changes: 1 addition & 1 deletion docs/content/concepts/spaces-and-transforms.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ Note that none of the names in the paths are special.

You can use [`rr.ViewCoordinates`](https://ref.rerun.io/docs/python/stable/common/archetypes/#rerun.archetypes.ViewCoordinates) to set your preferred view coordinate systems, giving semantic meaning to the XYZ axes of the space.

For 3D spaces it can be used to log what the up-axis is in your coordinate system. This will help Rerun set a good default view of your 3D scene, as well as make the virtual eye interactions more natural. This can be done with `rr.log("world", rr.ViewCoordinates(up="+Z"), static=True)`.
For 3D spaces it can be used to log what the up-axis is in your coordinate system. This will help Rerun set a good default view of your 3D scene, as well as make the virtual eye interactions more natural. This can be done with `rr.log("world", rr.ViewCoordinates.RIGHT_HAND_Z_UP), static=True)`.

You can also use this `log_view_coordinates` for pinhole entities, but it is encouraged that you instead use [`rr.log(…, rr.Pinhole(camera_xyz=…))`](https://ref.rerun.io/docs/python/stable/common/archetypes/#rerun.archetypes.Pinhole) for this. The default coordinate system for pinhole entities is `RDF` (X=Right, Y=Down, Z=Forward).

Expand Down

0 comments on commit 1bfbfeb

Please sign in to comment.