Skip to content

Commit

Permalink
feat: Limit the size of aggregated WebXDC update to 100 KiB (#4825)
Browse files Browse the repository at this point in the history
Before, update sending might be delayed due to rate limits and later merged into large
messages. This is undesirable for apps that want to send large files over WebXDC updates because the
message with aggregated update may be too large for actual sending and hit the provider limit or
require multiple attempts on a flaky SMTP connection.

So, don't aggregate updates if the size of an aggregated update will exceed the limit of 100
KiB. This is a soft limit, so it may be exceeded if a single update is larger and it limits only the
update JSON size, so the message with all envelopes still may be larger. Also the limit may be
exceeded when updates are sent together with the WebXDC instance when resending it as the instance
size isn't accounted to not complicate the code. At least this is not worse than the previous
behaviour when all updates were attached.
  • Loading branch information
iequidoo committed Jul 13, 2024
1 parent 0f26da4 commit 9996c2d
Show file tree
Hide file tree
Showing 3 changed files with 245 additions and 67 deletions.
35 changes: 33 additions & 2 deletions src/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ use crate::tools::{
create_smeared_timestamps, get_abs_path, gm2local_offset, smeared_time, time, IsNoneOrEmpty,
SystemTime,
};
use crate::webxdc::StatusUpdateSerial;

/// An chat item, such as a message or a marker.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
Expand Down Expand Up @@ -4272,9 +4273,39 @@ pub async fn resend_msgs(context: &Context, msg_ids: &[MsgId]) -> Result<()> {
msg.timestamp_sort = create_smeared_timestamp(context);
// note(treefit): only matters if it is the last message in chat (but probably to expensive to check, debounce also solves it)
chatlist_events::emit_chatlist_item_changed(context, msg.chat_id);
if !create_send_msg_jobs(context, &mut msg).await?.is_empty() {
context.scheduler.interrupt_smtp().await;
if create_send_msg_jobs(context, &mut msg).await?.is_empty() {
continue;
}
if msg.viewtype == Viewtype::Webxdc {
let conn_fn = |conn: &mut rusqlite::Connection| {
let range = conn.query_row(
"SELECT IFNULL(min(id), 1), IFNULL(max(id), 0) \
FROM msgs_status_updates WHERE msg_id=?",
(msg.id,),
|row| {
let min_id: StatusUpdateSerial = row.get(0)?;
let max_id: StatusUpdateSerial = row.get(1)?;
Ok((min_id, max_id))
},
)?;
if range.0 > range.1 {
return Ok(());
};
// `first_serial` must be decreased, otherwise if `Context::flush_status_updates()`
// runs in parallel, it would miss the race and instead of resending just remove the
// updates thinking that they have been already sent.
conn.execute(
"INSERT INTO smtp_status_updates (msg_id, first_serial, last_serial, descr) \
VALUES(?, ?, ?, '') \
ON CONFLICT(msg_id) \
DO UPDATE SET first_serial=min(first_serial - 1, excluded.first_serial)",
(msg.id, range.0, range.1),
)?;
Ok(())
};
context.sql.call_write(conn_fn).await?;
}
context.scheduler.interrupt_smtp().await;
}
Ok(())
}
Expand Down
10 changes: 8 additions & 2 deletions src/mimefactory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ use crate::tools::IsNoneOrEmpty;
use crate::tools::{
create_outgoing_rfc724_mid, create_smeared_timestamp, remove_subject_prefix, time,
};
use crate::webxdc::StatusUpdateSerial;
use crate::{location, peer_channels};

// attachments of 25 mb brutto should work on the majority of providers
Expand Down Expand Up @@ -1369,8 +1370,13 @@ impl MimeFactory {
} else if msg.viewtype == Viewtype::Webxdc {
let topic = peer_channels::create_random_topic();
headers.push(create_iroh_header(context, topic, msg.id).await?);
if let Some(json) = context
.render_webxdc_status_update_object(msg.id, None)
if let (Some(json), _) = context
.render_webxdc_status_update_object(
msg.id,
StatusUpdateSerial::MIN,
StatusUpdateSerial::MAX,
None,
)
.await?
{
parts.push(context.build_status_update_part(&json));
Expand Down
Loading

0 comments on commit 9996c2d

Please sign in to comment.