Skip to content

Commit

Permalink
make the new calendar code live
Browse files Browse the repository at this point in the history
This has the bot actually start running all of the new calendar_check
code. At the moment, events are being sent into nothingness. The idea is
to have the `discord` module handle receiving the events/etc.
  • Loading branch information
gburgessiv committed Jan 4, 2025
1 parent ae662df commit 10b279d
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 29 deletions.
6 changes: 3 additions & 3 deletions calendar_check/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,22 @@ fn utc_time_to_api_time(time: &UtcTime) -> String {
time.to_rfc3339_opts(chrono::SecondsFormat::Secs, use_z)
}

#[derive(Debug, Default)]
#[derive(Copy, Clone, Debug, Default)]
pub enum CommunityEventType {
#[default]
OfficeHours,
SyncUp,
}

#[derive(Debug, Default)]
#[derive(Clone, Debug, Default)]
pub struct CommunityEventDescriptionData {
pub event_type: CommunityEventType,
pub mention_users: Vec<Box<str>>,
pub mention_channels: Vec<Box<str>>,
pub ping_duration_before_start_mins: Option<u32>,
}

#[derive(Debug, Default)]
#[derive(Clone, Debug, Default)]
pub struct CommunityEvent {
// Unix millis.
pub start_time: UtcTime,
Expand Down
2 changes: 1 addition & 1 deletion docker/run_buildbot_monitor.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ if [[ -z "${DISCORD_TOKEN:-}" ]]; then
fi

exec_mon() {
./llvm_buildbot_monitor \
./llvm_buildbot_monitor/llvm_buildbot_monitor \
--discord-token="${DISCORD_TOKEN}" \
--database=/db/db.sqlite3
}
Expand Down
60 changes: 37 additions & 23 deletions llvm_buildbot_monitor/src/calendar_events.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@ use std::{collections::HashSet, time::Duration};
use anyhow::{Context, Result};
use calendar_check::CommunityEvent;
use chrono::{DateTime, Utc};
use log::{error, info};
use serenity::model::event;
use tokio::sync::watch;
use log::{error, info, warn};
use tokio::sync::{broadcast, watch};

use crate::storage::Storage;

Expand Down Expand Up @@ -111,33 +110,41 @@ fn load_and_gc_previous_calendar_pings(
result
}

async fn run_discord_ping_impl(event: &CommunityEvent) -> Result<()> {
todo!();
}

async fn run_discord_pings(
state: Arc<State>,
ping_indices: Vec<usize>,
storage: Arc<Mutex<Storage>>,
fn run_discord_pings(
state: &Arc<State>,
ping_indices: &[usize],
storage: &Arc<Mutex<Storage>>,
discord_messages: &broadcast::Sender<CommunityEvent>,
) {
let events_to_ping = ping_indices.into_iter().map(|i| (i, &state.events[i]));
let events_to_ping = ping_indices.into_iter().map(|&i| (i, &state.events[i]));
let mut successful_pings = Vec::new();
for (i, event) in events_to_ping {
if let Err(x) = run_discord_ping_impl(event).await {
error!("Failed sending calendar ping for event {:?}: {x}", event.id);
if discord_messages.send(event.clone()).is_err() {
warn!(
"Can't send calendar ping for event {:?}; no receivers of the message exist",
event.id
);
continue;
}
successful_pings.push(i);
}

let state = state.clone();
let storage = storage.clone();
// Just drop this on the floor, under the expectation that it'll
// complete soon enough anyway.
let _ = tokio::task::spawn_blocking(move || {
if successful_pings.is_empty() {
return;
}

let state = state.clone();
let storage = storage.clone();
// Just drop this on the floor, under the expectation that it'll
// complete soon enough anyway.
let _ = tokio::task::spawn_blocking(move || {
for i in successful_pings {
let id = state.events[i].id.as_ref();
if let Err(x) = storage.lock().unwrap().add_sent_calendar_ping(id) {
error!("Failed adding sent calendar ping to storage: {x}");
}
});
}
}
});
}

fn calculate_event_ping_data(
Expand Down Expand Up @@ -178,6 +185,7 @@ fn calculate_event_ping_data(
async fn run_discord_integration_forever(
storage: Arc<Mutex<Storage>>,
mut state_receiver: watch::Receiver<Arc<State>>,
discord_messages: broadcast::Sender<CommunityEvent>,
) {
let mut already_pinged = {
let storage = storage.clone();
Expand All @@ -192,6 +200,7 @@ async fn run_discord_integration_forever(
let now = Utc::now();
let (ping_now_indices, nearest_unfired_ping) =
calculate_event_ping_data(&events, &now, &already_pinged);

if !ping_now_indices.is_empty() {
// While this hasn't been pinged _yet_, it will be very shortly
// by the following `spawn`. Note this can't be done in the loop
Expand All @@ -200,7 +209,7 @@ async fn run_discord_integration_forever(
for &i in &ping_now_indices {
already_pinged.insert(events.events[i].id.clone());
}
tokio::spawn(run_discord_pings(events, ping_now_indices, storage.clone()));
run_discord_pings(&events, &ping_now_indices, &storage, &discord_messages);
}

match nearest_unfired_ping {
Expand Down Expand Up @@ -252,7 +261,10 @@ fn calculate_next_refresh_time(state: &State) -> DateTime<Utc> {
}
}

pub(crate) async fn run_calendar_forever(storage: Arc<Mutex<Storage>>) {
pub(crate) async fn run_calendar_forever(
storage: Arc<Mutex<Storage>>,
discord_messages: broadcast::Sender<CommunityEvent>,
) {
// N.B., this is Arc<> because `watch` has internal sync locking. The
// discord integration may want to hold a borrow() for a while, which
// could get spicy. Skip the spice by using a cheaply-cloneable type.
Expand All @@ -261,6 +273,7 @@ pub(crate) async fn run_calendar_forever(storage: Arc<Mutex<Storage>>) {
tokio::spawn(run_discord_integration_forever(
storage.clone(),
state_receiver,
discord_messages,
));
loop {
// N.B., This is the only sender, so a simple `.borrow()` here instead of
Expand All @@ -277,6 +290,7 @@ pub(crate) async fn run_calendar_forever(storage: Arc<Mutex<Storage>>) {
}

let new_state = load_state_with_retry().await;
info!("Calendar refresh successfully loaded {:?} events", new_state.events.len());
// The receiver is meant to also run forever. Something really bad
// happened if it died. Probably best to just crash the program at that
// point, honestly.
Expand Down
18 changes: 16 additions & 2 deletions llvm_buildbot_monitor/src/discord.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use std::sync::{Arc, Mutex};
use std::time::Duration;

use anyhow::Result;
use calendar_check::CommunityEvent;
use futures::StreamExt;
use log::{error, info, warn};
use serenity::async_trait;
Expand All @@ -19,7 +20,7 @@ use serenity::http::Http;
use serenity::model::prelude::*;
use serenity::prelude::*;
use tokio::runtime::Runtime;
use tokio::sync::watch;
use tokio::sync::{broadcast, watch};

// TODO:
// ## Include broken stage names in #buildbot-updates (?)
Expand Down Expand Up @@ -601,6 +602,7 @@ struct MessageHandler {
servers: Arc<Mutex<GuildServerState>>,
bot_version: &'static str,
storage: Arc<Mutex<Storage>>,
community_event_sender: broadcast::Sender<CommunityEvent>,
}

fn append_discord_safe_email(targ: &mut String, email: &Email) {
Expand Down Expand Up @@ -884,7 +886,10 @@ impl serenity::client::EventHandler for MessageHandler {
Some("list-emails") => Some(self.handle_list_emails(from_uid)),
Some("rm-email") => Some(self.handle_remove_email(from_uid, content_fields.next())),
Some(_) | None => {
info!("Received a DM-ish message; not sure what to do with it: {:?}", content);
info!(
"Received a DM-ish message; not sure what to do with it: {:?}",
content
);
None
}
};
Expand Down Expand Up @@ -1368,13 +1373,22 @@ pub(crate) fn run(
let storage = Arc::new(Mutex::new(storage));
runtime.spawn(draw_ui(snapshots, ui_broadcaster.clone()));
let intents = GatewayIntents::DIRECT_MESSAGES | GatewayIntents::GUILDS;

// 25 is probably a way overestimate here, but who cares.
let (community_event_sender, _) = broadcast::channel(25);
runtime.spawn(super::calendar_events::run_calendar_forever(
storage.clone(),
community_event_sender.clone(),
));

runtime.block_on(async move {
serenity::Client::builder(token, intents)
.event_handler(MessageHandler {
ui_broadcaster,
servers: Default::default(),
bot_version,
storage,
community_event_sender,
})
.await?
.start()
Expand Down

0 comments on commit 10b279d

Please sign in to comment.