Create Insight Generation Feature
Added integration with Messages API and Ollama
This commit is contained in:
173
src/ai/ollama.rs
Normal file
173
src/ai/ollama.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use anyhow::Result;
|
||||
use chrono::NaiveDate;
|
||||
use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::memories::MemoryItem;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OllamaClient {
|
||||
client: Client,
|
||||
pub base_url: String,
|
||||
pub model: String,
|
||||
}
|
||||
|
||||
impl OllamaClient {
|
||||
pub fn new(base_url: String, model: String) -> Self {
|
||||
Self {
|
||||
client: Client::new(),
|
||||
base_url,
|
||||
model,
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract final answer from thinking model output
|
||||
/// Handles <think>...</think> tags and takes everything after
|
||||
fn extract_final_answer(&self, response: &str) -> String {
|
||||
let response = response.trim();
|
||||
|
||||
// Look for </think> tag and take everything after it
|
||||
if let Some(pos) = response.find("</think>") {
|
||||
let answer = response[pos + 8..].trim();
|
||||
if !answer.is_empty() {
|
||||
return answer.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: return the whole response trimmed
|
||||
response.to_string()
|
||||
}
|
||||
|
||||
pub async fn generate(&self, prompt: &str, system: Option<&str>) -> Result<String> {
|
||||
log::debug!("=== Ollama Request ===");
|
||||
log::debug!("Model: {}", self.model);
|
||||
if let Some(sys) = system {
|
||||
log::debug!("System: {}", sys);
|
||||
}
|
||||
log::debug!("Prompt:\n{}", prompt);
|
||||
log::debug!("=====================");
|
||||
|
||||
let request = OllamaRequest {
|
||||
model: self.model.clone(),
|
||||
prompt: prompt.to_string(),
|
||||
stream: false,
|
||||
system: system.map(|s| s.to_string()),
|
||||
};
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.post(&format!("{}/api/generate", self.base_url))
|
||||
.json(&request)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let status = response.status();
|
||||
let error_body = response.text().await.unwrap_or_default();
|
||||
log::error!("Ollama request failed: {} - {}", status, error_body);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Ollama request failed: {} - {}",
|
||||
status,
|
||||
error_body
|
||||
));
|
||||
}
|
||||
|
||||
let result: OllamaResponse = response.json().await?;
|
||||
|
||||
log::debug!("=== Ollama Response ===");
|
||||
log::debug!("Raw response: {}", result.response.trim());
|
||||
log::debug!("=======================");
|
||||
|
||||
// Extract final answer from thinking model output
|
||||
let cleaned = self.extract_final_answer(&result.response);
|
||||
|
||||
log::debug!("=== Cleaned Response ===");
|
||||
log::debug!("Final answer: {}", cleaned);
|
||||
log::debug!("========================");
|
||||
|
||||
Ok(cleaned)
|
||||
}
|
||||
|
||||
/// Generate a title for a single photo based on its context
|
||||
pub async fn generate_photo_title(
|
||||
&self,
|
||||
date: NaiveDate,
|
||||
location: Option<&str>,
|
||||
sms_summary: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let location_str = location.unwrap_or("Unknown location");
|
||||
let sms_str = sms_summary.unwrap_or("No messages");
|
||||
|
||||
let prompt = format!(
|
||||
r#"Create a short title (maximum 8 words) for this photo:
|
||||
|
||||
Date: {}
|
||||
Location: {}
|
||||
Messages: {}
|
||||
|
||||
Use specific details from the context above. If no specific details are available, use a simple descriptive title.
|
||||
|
||||
Return ONLY the title, nothing else."#,
|
||||
date.format("%B %d, %Y"),
|
||||
location_str,
|
||||
sms_str
|
||||
);
|
||||
|
||||
let system =
|
||||
"You are a memory assistant. Use only the information provided. Do not invent details.";
|
||||
|
||||
let title = self.generate(&prompt, Some(system)).await?;
|
||||
Ok(title.trim().trim_matches('"').to_string())
|
||||
}
|
||||
|
||||
/// Generate a summary for a single photo based on its context
|
||||
pub async fn generate_photo_summary(
|
||||
&self,
|
||||
date: NaiveDate,
|
||||
location: Option<&str>,
|
||||
sms_summary: Option<&str>,
|
||||
) -> Result<String> {
|
||||
let location_str = location.unwrap_or("somewhere");
|
||||
let sms_str = sms_summary.unwrap_or("No messages");
|
||||
|
||||
let prompt = format!(
|
||||
r#"Write a brief 1-2 paragraph description of this moment based on the available information:
|
||||
|
||||
Date: {}
|
||||
Location: {}
|
||||
Messages: {}
|
||||
|
||||
Use only the specific details provided above. Mention people's names, places, or activities if they appear in the context. Write in first person as Cam in a casual but fluent tone. If limited information is available, keep it simple and factual. If the location is unknown omit it"#,
|
||||
date.format("%B %d, %Y"),
|
||||
location_str,
|
||||
sms_str
|
||||
);
|
||||
|
||||
let system = "You are a memory refreshing assistant. Use only the information provided. Do not invent details. Help me remember this day.";
|
||||
|
||||
self.generate(&prompt, Some(system)).await
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct MemoryContext {
|
||||
pub date: NaiveDate,
|
||||
pub photos: Vec<MemoryItem>,
|
||||
pub sms_summary: Option<String>,
|
||||
pub locations: Vec<String>,
|
||||
pub cameras: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct OllamaRequest {
|
||||
model: String,
|
||||
prompt: String,
|
||||
stream: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
system: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct OllamaResponse {
|
||||
response: String,
|
||||
}
|
||||
Reference in New Issue
Block a user