Add Insights Model Discovery and Fallback Handling

This commit is contained in:
Cameron
2026-01-03 20:27:34 -05:00
parent 1171f19845
commit cf52d4ab76
10 changed files with 419 additions and 80 deletions

View File

@@ -1,13 +1,16 @@
use actix_web::{HttpResponse, Responder, delete, get, post, web};
use serde::{Deserialize, Serialize};
use crate::ai::InsightGenerator;
use crate::ai::{InsightGenerator, OllamaClient};
use crate::data::Claims;
use crate::database::InsightDao;
use crate::utils::normalize_path;
#[derive(Debug, Deserialize)]
pub struct GeneratePhotoInsightRequest {
pub file_path: String,
#[serde(default)]
pub model: Option<String>,
}
#[derive(Debug, Deserialize)]
@@ -25,6 +28,20 @@ pub struct PhotoInsightResponse {
pub model_version: String,
}
#[derive(Debug, Serialize)]
pub struct AvailableModelsResponse {
pub primary: ServerModels,
#[serde(skip_serializing_if = "Option::is_none")]
pub fallback: Option<ServerModels>,
}
#[derive(Debug, Serialize)]
pub struct ServerModels {
pub url: String,
pub models: Vec<String>,
pub default_model: String,
}
/// POST /insights/generate - Generate insight for a specific photo
#[post("/insights/generate")]
pub async fn generate_insight_handler(
@@ -32,14 +49,17 @@ pub async fn generate_insight_handler(
request: web::Json<GeneratePhotoInsightRequest>,
insight_generator: web::Data<InsightGenerator>,
) -> impl Responder {
let normalized_path = normalize_path(&request.file_path);
log::info!(
"Manual insight generation triggered for photo: {}",
request.file_path
"Manual insight generation triggered for photo: {} with model: {:?}",
normalized_path,
request.model
);
// Generate insight
// Generate insight with optional custom model
match insight_generator
.generate_insight_for_photo(&request.file_path)
.generate_insight_for_photo_with_model(&normalized_path, request.model.clone())
.await
{
Ok(()) => HttpResponse::Ok().json(serde_json::json!({
@@ -62,12 +82,13 @@ pub async fn get_insight_handler(
query: web::Query<GetPhotoInsightQuery>,
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
) -> impl Responder {
log::debug!("Fetching insight for {}", query.path);
let normalized_path = normalize_path(&query.path);
log::debug!("Fetching insight for {}", normalized_path);
let otel_context = opentelemetry::Context::new();
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
match dao.get_insight(&otel_context, &query.path) {
match dao.get_insight(&otel_context, &normalized_path) {
Ok(Some(insight)) => {
let response = PhotoInsightResponse {
id: insight.id,
@@ -98,12 +119,13 @@ pub async fn delete_insight_handler(
query: web::Query<GetPhotoInsightQuery>,
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
) -> impl Responder {
log::info!("Deleting insight for {}", query.path);
let normalized_path = normalize_path(&query.path);
log::info!("Deleting insight for {}", normalized_path);
let otel_context = opentelemetry::Context::new();
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
match dao.delete_insight(&otel_context, &query.path) {
match dao.delete_insight(&otel_context, &normalized_path) {
Ok(()) => HttpResponse::Ok().json(serde_json::json!({
"success": true,
"message": "Insight deleted successfully"
@@ -152,3 +174,53 @@ pub async fn get_all_insights_handler(
}
}
}
/// GET /insights/models - List available models from both servers
#[get("/insights/models")]
pub async fn get_available_models_handler(
_claims: Claims,
app_state: web::Data<crate::state::AppState>,
) -> impl Responder {
log::debug!("Fetching available models");
let ollama_client = &app_state.ollama;
// Fetch models from primary server
let primary_models = match OllamaClient::list_models(&ollama_client.primary_url).await {
Ok(models) => models,
Err(e) => {
log::warn!("Failed to fetch models from primary server: {:?}", e);
vec![]
}
};
let primary = ServerModels {
url: ollama_client.primary_url.clone(),
models: primary_models,
default_model: ollama_client.primary_model.clone(),
};
// Fetch models from fallback server if configured
let fallback = if let Some(fallback_url) = &ollama_client.fallback_url {
match OllamaClient::list_models(fallback_url).await {
Ok(models) => Some(ServerModels {
url: fallback_url.clone(),
models,
default_model: ollama_client
.fallback_model
.clone()
.unwrap_or_else(|| ollama_client.primary_model.clone()),
}),
Err(e) => {
log::warn!("Failed to fetch models from fallback server: {:?}", e);
None
}
}
} else {
None
};
let response = AvailableModelsResponse { primary, fallback };
HttpResponse::Ok().json(response)
}

View File

@@ -1,6 +1,7 @@
use anyhow::Result;
use chrono::Utc;
use chrono::{DateTime, Utc};
use serde::Deserialize;
use std::fs::File;
use std::sync::{Arc, Mutex};
use crate::ai::ollama::OllamaClient;
@@ -8,6 +9,7 @@ use crate::ai::sms_client::SmsApiClient;
use crate::database::models::InsertPhotoInsight;
use crate::database::{ExifDao, InsightDao};
use crate::memories::extract_date_from_filename;
use crate::utils::normalize_path;
#[derive(Deserialize)]
struct NominatimResponse {
@@ -20,9 +22,7 @@ struct NominatimAddress {
city: Option<String>,
town: Option<String>,
village: Option<String>,
county: Option<String>,
state: Option<String>,
country: Option<String>,
}
#[derive(Clone)]
@@ -31,6 +31,7 @@ pub struct InsightGenerator {
sms_client: SmsApiClient,
insight_dao: Arc<Mutex<Box<dyn InsightDao>>>,
exif_dao: Arc<Mutex<Box<dyn ExifDao>>>,
base_path: String,
}
impl InsightGenerator {
@@ -39,12 +40,14 @@ impl InsightGenerator {
sms_client: SmsApiClient,
insight_dao: Arc<Mutex<Box<dyn InsightDao>>>,
exif_dao: Arc<Mutex<Box<dyn ExifDao>>>,
base_path: String,
) -> Self {
Self {
ollama,
sms_client,
insight_dao,
exif_dao,
base_path,
}
}
@@ -69,16 +72,35 @@ impl InsightGenerator {
None
}
/// Generate AI insight for a single photo
pub async fn generate_insight_for_photo(&self, file_path: &str) -> Result<()> {
/// Generate AI insight for a single photo with optional custom model
pub async fn generate_insight_for_photo_with_model(
&self,
file_path: &str,
custom_model: Option<String>,
) -> Result<()> {
// Normalize path to ensure consistent forward slashes in database
let file_path = normalize_path(file_path);
log::info!("Generating insight for photo: {}", file_path);
// Create custom Ollama client if model is specified
let ollama_client = if let Some(model) = custom_model {
log::info!("Using custom model: {}", model);
OllamaClient::new(
self.ollama.primary_url.clone(),
self.ollama.fallback_url.clone(),
model.clone(),
Some(model), // Use the same custom model for fallback server
)
} else {
self.ollama.clone()
};
// 1. Get EXIF data for the photo
let otel_context = opentelemetry::Context::new();
let exif = {
let mut exif_dao = self.exif_dao.lock().expect("Unable to lock ExifDao");
exif_dao
.get_exif(&otel_context, file_path)
.get_exif(&otel_context, &file_path)
.map_err(|e| anyhow::anyhow!("Failed to get EXIF: {:?}", e))?
};
@@ -88,17 +110,33 @@ impl InsightGenerator {
} else {
log::warn!("No date_taken in EXIF for {}, trying filename", file_path);
extract_date_from_filename(file_path)
extract_date_from_filename(&file_path)
.map(|dt| dt.timestamp())
.or_else(|| {
// Combine base_path with file_path to get full path
let full_path = std::path::Path::new(&self.base_path).join(&file_path);
File::open(&full_path)
.and_then(|f| f.metadata())
.and_then(|m| m.created().or(m.modified()))
.map(|t| DateTime::<Utc>::from(t).timestamp())
.inspect_err(|e| {
log::warn!(
"Failed to get file timestamp for insight {}: {}",
file_path,
e
)
})
.ok()
})
.unwrap_or_else(|| Utc::now().timestamp())
};
let date_taken = chrono::DateTime::from_timestamp(timestamp, 0)
let date_taken = DateTime::from_timestamp(timestamp, 0)
.map(|dt| dt.date_naive())
.unwrap_or_else(|| Utc::now().date_naive());
// 3. Extract contact name from file path
let contact = Self::extract_contact_from_path(file_path);
let contact = Self::extract_contact_from_path(&file_path);
log::info!("Extracted contact from path: {:?}", contact);
// 4. Fetch SMS messages for the contact (±1 day)
@@ -124,7 +162,7 @@ impl InsightGenerator {
let sms_summary = if !sms_messages.is_empty() {
match self
.sms_client
.summarize_context(&sms_messages, &self.ollama)
.summarize_context(&sms_messages, &ollama_client)
.await
{
Ok(summary) => Some(summary),
@@ -157,13 +195,11 @@ impl InsightGenerator {
);
// 7. Generate title and summary with Ollama
let title = self
.ollama
let title = ollama_client
.generate_photo_title(date_taken, location.as_deref(), sms_summary.as_deref())
.await?;
let summary = self
.ollama
let summary = ollama_client
.generate_photo_summary(date_taken, location.as_deref(), sms_summary.as_deref())
.await?;
@@ -176,7 +212,7 @@ impl InsightGenerator {
title,
summary,
generated_at: Utc::now().timestamp(),
model_version: self.ollama.model.clone(),
model_version: ollama_client.primary_model.clone(),
};
let mut dao = self.insight_dao.lock().expect("Unable to lock InsightDao");

View File

@@ -4,7 +4,8 @@ pub mod ollama;
pub mod sms_client;
pub use handlers::{
delete_insight_handler, generate_insight_handler, get_all_insights_handler, get_insight_handler,
delete_insight_handler, generate_insight_handler, get_all_insights_handler,
get_available_models_handler, get_insight_handler,
};
pub use insight_generator::InsightGenerator;
pub use ollama::OllamaClient;

View File

@@ -2,25 +2,60 @@ use anyhow::Result;
use chrono::NaiveDate;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use crate::memories::MemoryItem;
use std::time::Duration;
#[derive(Clone)]
pub struct OllamaClient {
client: Client,
pub base_url: String,
pub model: String,
pub primary_url: String,
pub fallback_url: Option<String>,
pub primary_model: String,
pub fallback_model: Option<String>,
}
impl OllamaClient {
pub fn new(base_url: String, model: String) -> Self {
pub fn new(
primary_url: String,
fallback_url: Option<String>,
primary_model: String,
fallback_model: Option<String>,
) -> Self {
Self {
client: Client::new(),
base_url,
model,
client: Client::builder()
.connect_timeout(Duration::from_secs(5)) // Quick connection timeout
.timeout(Duration::from_secs(120)) // Total request timeout for generation
.build()
.unwrap_or_else(|_| Client::new()),
primary_url,
fallback_url,
primary_model,
fallback_model,
}
}
/// List available models on an Ollama server
pub async fn list_models(url: &str) -> Result<Vec<String>> {
let client = Client::builder()
.connect_timeout(Duration::from_secs(5))
.timeout(Duration::from_secs(10))
.build()?;
let response = client.get(&format!("{}/api/tags", url)).send().await?;
if !response.status().is_success() {
return Err(anyhow::anyhow!("Failed to list models from {}", url));
}
let tags_response: OllamaTagsResponse = response.json().await?;
Ok(tags_response.models.into_iter().map(|m| m.name).collect())
}
/// Check if a model is available on a server
pub async fn is_model_available(url: &str, model_name: &str) -> Result<bool> {
let models = Self::list_models(url).await?;
Ok(models.iter().any(|m| m == model_name))
}
/// Extract final answer from thinking model output
/// Handles <think>...</think> tags and takes everything after
fn extract_final_answer(&self, response: &str) -> String {
@@ -38,17 +73,15 @@ impl OllamaClient {
response.to_string()
}
pub async fn generate(&self, prompt: &str, system: Option<&str>) -> Result<String> {
log::debug!("=== Ollama Request ===");
log::debug!("Model: {}", self.model);
if let Some(sys) = system {
log::debug!("System: {}", sys);
}
log::debug!("Prompt:\n{}", prompt);
log::debug!("=====================");
async fn try_generate(
&self,
url: &str,
model: &str,
prompt: &str,
system: Option<&str>,
) -> Result<String> {
let request = OllamaRequest {
model: self.model.clone(),
model: model.to_string(),
prompt: prompt.to_string(),
stream: false,
system: system.map(|s| s.to_string()),
@@ -56,7 +89,7 @@ impl OllamaClient {
let response = self
.client
.post(&format!("{}/api/generate", self.base_url))
.post(&format!("{}/api/generate", url))
.json(&request)
.send()
.await?;
@@ -64,7 +97,6 @@ impl OllamaClient {
if !response.status().is_success() {
let status = response.status();
let error_body = response.text().await.unwrap_or_default();
log::error!("Ollama request failed: {} - {}", status, error_body);
return Err(anyhow::anyhow!(
"Ollama request failed: {} - {}",
status,
@@ -73,13 +105,77 @@ impl OllamaClient {
}
let result: OllamaResponse = response.json().await?;
Ok(result.response)
}
pub async fn generate(&self, prompt: &str, system: Option<&str>) -> Result<String> {
log::debug!("=== Ollama Request ===");
log::debug!("Primary model: {}", self.primary_model);
if let Some(sys) = system {
log::debug!("System: {}", sys);
}
log::debug!("Prompt:\n{}", prompt);
log::debug!("=====================");
// Try primary server first with primary model
log::info!(
"Attempting to generate with primary server: {} (model: {})",
self.primary_url,
self.primary_model
);
let primary_result = self
.try_generate(&self.primary_url, &self.primary_model, prompt, system)
.await;
let raw_response = match primary_result {
Ok(response) => {
log::info!("Successfully generated response from primary server");
response
}
Err(e) => {
log::warn!("Primary server failed: {}", e);
// Try fallback server if available
if let Some(fallback_url) = &self.fallback_url {
// Use fallback model if specified, otherwise use primary model
let fallback_model =
self.fallback_model.as_ref().unwrap_or(&self.primary_model);
log::info!(
"Attempting to generate with fallback server: {} (model: {})",
fallback_url,
fallback_model
);
match self
.try_generate(fallback_url, fallback_model, prompt, system)
.await
{
Ok(response) => {
log::info!("Successfully generated response from fallback server");
response
}
Err(fallback_e) => {
log::error!("Fallback server also failed: {}", fallback_e);
return Err(anyhow::anyhow!(
"Both primary and fallback servers failed. Primary: {}, Fallback: {}",
e,
fallback_e
));
}
}
} else {
log::error!("No fallback server configured");
return Err(e);
}
}
};
log::debug!("=== Ollama Response ===");
log::debug!("Raw response: {}", result.response.trim());
log::debug!("Raw response: {}", raw_response.trim());
log::debug!("=======================");
// Extract final answer from thinking model output
let cleaned = self.extract_final_answer(&result.response);
let cleaned = self.extract_final_answer(&raw_response);
log::debug!("=== Cleaned Response ===");
log::debug!("Final answer: {}", cleaned);
@@ -99,7 +195,7 @@ impl OllamaClient {
let sms_str = sms_summary.unwrap_or("No messages");
let prompt = format!(
r#"Create a short title (maximum 8 words) for this photo:
r#"Create a short title (maximum 8 words) about this moment:
Date: {}
Location: {}
@@ -113,8 +209,7 @@ Return ONLY the title, nothing else."#,
sms_str
);
let system =
"You are a memory assistant. Use only the information provided. Do not invent details.";
let system = "You are my long term memory assistant. Use only the information provided. Do not invent details.";
let title = self.generate(&prompt, Some(system)).await?;
Ok(title.trim().trim_matches('"').to_string())
@@ -127,7 +222,7 @@ Return ONLY the title, nothing else."#,
location: Option<&str>,
sms_summary: Option<&str>,
) -> Result<String> {
let location_str = location.unwrap_or("somewhere");
let location_str = location.unwrap_or("Unknown");
let sms_str = sms_summary.unwrap_or("No messages");
let prompt = format!(
@@ -137,7 +232,7 @@ Date: {}
Location: {}
Messages: {}
Use only the specific details provided above. Mention people's names, places, or activities if they appear in the context. Write in first person as Cam in a casual but fluent tone. If limited information is available, keep it simple and factual. If the location is unknown omit it"#,
Use only the specific details provided above. Mention people's names, places, or activities if they appear in the context. Write in first person as Cameron with the tone of a journal entry. If limited information is available, keep it simple and factual. If the location is unknown omit it"#,
date.format("%B %d, %Y"),
location_str,
sms_str
@@ -147,15 +242,6 @@ Use only the specific details provided above. Mention people's names, places, or
self.generate(&prompt, Some(system)).await
}
}
pub struct MemoryContext {
pub date: NaiveDate,
pub photos: Vec<MemoryItem>,
pub sms_summary: Option<String>,
pub locations: Vec<String>,
pub cameras: Vec<String>,
}
#[derive(Serialize)]
@@ -171,3 +257,13 @@ struct OllamaRequest {
struct OllamaResponse {
response: String,
}
#[derive(Deserialize)]
struct OllamaTagsResponse {
models: Vec<OllamaModel>,
}
#[derive(Deserialize)]
struct OllamaModel {
name: String,
}