feat: add enrichment parameter to gather_search_context() replacing weak metadata query

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Cameron
2026-03-18 17:17:21 -04:00
parent c0d27d0b9e
commit e58b8fe743

View File

@@ -512,22 +512,29 @@ impl InsightGenerator {
timestamp: i64,
location: Option<&str>,
contact: Option<&str>,
enrichment: Option<&str>,
) -> Result<Option<String>> {
let tracer = global_tracer();
let span = tracer.start_with_context("ai.context.search", parent_cx);
let search_cx = parent_cx.with_span(span);
// Build semantic query from metadata
let query_text = format!(
"searches about {} {} {}",
DateTime::from_timestamp(timestamp, 0)
.map(|dt| dt.format("%B %Y").to_string())
.unwrap_or_default(),
location.unwrap_or(""),
contact
.map(|c| format!("involving {}", c))
.unwrap_or_default()
);
// Use enrichment (topics + photo description + tags) if available;
// fall back to generic temporal query.
let query_text = if let Some(enriched) = enrichment {
enriched.to_string()
} else {
// Fallback: generic temporal query
format!(
"searches about {} {} {}",
DateTime::from_timestamp(timestamp, 0)
.map(|dt| dt.format("%B %Y").to_string())
.unwrap_or_default(),
location.unwrap_or(""),
contact
.map(|c| format!("involving {}", c))
.unwrap_or_default()
)
};
let query_embedding = match self.ollama.generate_embedding(&query_text).await {
Ok(emb) => emb,
@@ -948,6 +955,7 @@ impl InsightGenerator {
timestamp,
location.as_deref(),
contact.as_deref(),
None, // enrichment — wired up in Task 5
)
.await
.ok()