264 lines
8.8 KiB
Rust
264 lines
8.8 KiB
Rust
use actix_web::{HttpRequest, HttpResponse, Responder, delete, get, post, web};
|
|
use opentelemetry::KeyValue;
|
|
use opentelemetry::trace::{Span, Status, Tracer};
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use crate::ai::{InsightGenerator, ModelCapabilities, OllamaClient};
|
|
use crate::data::Claims;
|
|
use crate::database::InsightDao;
|
|
use crate::otel::{extract_context_from_request, global_tracer};
|
|
use crate::utils::normalize_path;
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
pub struct GeneratePhotoInsightRequest {
|
|
pub file_path: String,
|
|
#[serde(default)]
|
|
pub model: Option<String>,
|
|
#[serde(default)]
|
|
pub system_prompt: Option<String>,
|
|
#[serde(default)]
|
|
pub num_ctx: Option<i32>,
|
|
}
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
pub struct GetPhotoInsightQuery {
|
|
pub path: String,
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
pub struct PhotoInsightResponse {
|
|
pub id: i32,
|
|
pub file_path: String,
|
|
pub title: String,
|
|
pub summary: String,
|
|
pub generated_at: i64,
|
|
pub model_version: String,
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
pub struct AvailableModelsResponse {
|
|
pub primary: ServerModels,
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
pub fallback: Option<ServerModels>,
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
pub struct ServerModels {
|
|
pub url: String,
|
|
pub models: Vec<ModelCapabilities>,
|
|
pub default_model: String,
|
|
}
|
|
|
|
/// POST /insights/generate - Generate insight for a specific photo
|
|
#[post("/insights/generate")]
|
|
pub async fn generate_insight_handler(
|
|
http_request: HttpRequest,
|
|
_claims: Claims,
|
|
request: web::Json<GeneratePhotoInsightRequest>,
|
|
insight_generator: web::Data<InsightGenerator>,
|
|
) -> impl Responder {
|
|
let parent_context = extract_context_from_request(&http_request);
|
|
let tracer = global_tracer();
|
|
let mut span = tracer.start_with_context("http.insights.generate", &parent_context);
|
|
|
|
let normalized_path = normalize_path(&request.file_path);
|
|
|
|
span.set_attribute(KeyValue::new("file_path", normalized_path.clone()));
|
|
if let Some(ref model) = request.model {
|
|
span.set_attribute(KeyValue::new("model", model.clone()));
|
|
}
|
|
if let Some(ref prompt) = request.system_prompt {
|
|
span.set_attribute(KeyValue::new("has_custom_prompt", true));
|
|
span.set_attribute(KeyValue::new("prompt_length", prompt.len() as i64));
|
|
}
|
|
if let Some(ctx) = request.num_ctx {
|
|
span.set_attribute(KeyValue::new("num_ctx", ctx as i64));
|
|
}
|
|
|
|
log::info!(
|
|
"Manual insight generation triggered for photo: {} with model: {:?}, custom_prompt: {}, num_ctx: {:?}",
|
|
normalized_path,
|
|
request.model,
|
|
request.system_prompt.is_some(),
|
|
request.num_ctx
|
|
);
|
|
|
|
// Generate insight with optional custom model, system prompt, and context size
|
|
let result = insight_generator
|
|
.generate_insight_for_photo_with_config(
|
|
&normalized_path,
|
|
request.model.clone(),
|
|
request.system_prompt.clone(),
|
|
request.num_ctx,
|
|
)
|
|
.await;
|
|
|
|
match result {
|
|
Ok(()) => {
|
|
span.set_status(Status::Ok);
|
|
HttpResponse::Ok().json(serde_json::json!({
|
|
"success": true,
|
|
"message": "Insight generated successfully"
|
|
}))
|
|
}
|
|
Err(e) => {
|
|
log::error!("Failed to generate insight: {:?}", e);
|
|
span.set_status(Status::error(e.to_string()));
|
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
|
"error": format!("Failed to generate insight: {:?}", e)
|
|
}))
|
|
}
|
|
}
|
|
}
|
|
|
|
/// GET /insights?path=/path/to/photo.jpg - Fetch insight for specific photo
|
|
#[get("/insights")]
|
|
pub async fn get_insight_handler(
|
|
_claims: Claims,
|
|
query: web::Query<GetPhotoInsightQuery>,
|
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
|
) -> impl Responder {
|
|
let normalized_path = normalize_path(&query.path);
|
|
log::debug!("Fetching insight for {}", normalized_path);
|
|
|
|
let otel_context = opentelemetry::Context::new();
|
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
|
|
|
match dao.get_insight(&otel_context, &normalized_path) {
|
|
Ok(Some(insight)) => {
|
|
let response = PhotoInsightResponse {
|
|
id: insight.id,
|
|
file_path: insight.file_path,
|
|
title: insight.title,
|
|
summary: insight.summary,
|
|
generated_at: insight.generated_at,
|
|
model_version: insight.model_version,
|
|
};
|
|
HttpResponse::Ok().json(response)
|
|
}
|
|
Ok(None) => HttpResponse::NotFound().json(serde_json::json!({
|
|
"error": "Insight not found"
|
|
})),
|
|
Err(e) => {
|
|
log::error!("Failed to fetch insight ({}): {:?}", &query.path, e);
|
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
|
"error": format!("Failed to fetch insight: {:?}", e)
|
|
}))
|
|
}
|
|
}
|
|
}
|
|
|
|
/// DELETE /insights?path=/path/to/photo.jpg - Remove insight (will regenerate on next request)
|
|
#[delete("/insights")]
|
|
pub async fn delete_insight_handler(
|
|
_claims: Claims,
|
|
query: web::Query<GetPhotoInsightQuery>,
|
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
|
) -> impl Responder {
|
|
let normalized_path = normalize_path(&query.path);
|
|
log::info!("Deleting insight for {}", normalized_path);
|
|
|
|
let otel_context = opentelemetry::Context::new();
|
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
|
|
|
match dao.delete_insight(&otel_context, &normalized_path) {
|
|
Ok(()) => HttpResponse::Ok().json(serde_json::json!({
|
|
"success": true,
|
|
"message": "Insight deleted successfully"
|
|
})),
|
|
Err(e) => {
|
|
log::error!("Failed to delete insight: {:?}", e);
|
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
|
"error": format!("Failed to delete insight: {:?}", e)
|
|
}))
|
|
}
|
|
}
|
|
}
|
|
|
|
/// GET /insights/all - Get all insights
|
|
#[get("/insights/all")]
|
|
pub async fn get_all_insights_handler(
|
|
_claims: Claims,
|
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
|
) -> impl Responder {
|
|
log::debug!("Fetching all insights");
|
|
|
|
let otel_context = opentelemetry::Context::new();
|
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
|
|
|
match dao.get_all_insights(&otel_context) {
|
|
Ok(insights) => {
|
|
let responses: Vec<PhotoInsightResponse> = insights
|
|
.into_iter()
|
|
.map(|insight| PhotoInsightResponse {
|
|
id: insight.id,
|
|
file_path: insight.file_path,
|
|
title: insight.title,
|
|
summary: insight.summary,
|
|
generated_at: insight.generated_at,
|
|
model_version: insight.model_version,
|
|
})
|
|
.collect();
|
|
|
|
HttpResponse::Ok().json(responses)
|
|
}
|
|
Err(e) => {
|
|
log::error!("Failed to fetch all insights: {:?}", e);
|
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
|
"error": format!("Failed to fetch insights: {:?}", e)
|
|
}))
|
|
}
|
|
}
|
|
}
|
|
|
|
/// GET /insights/models - List available models from both servers with capabilities
|
|
#[get("/insights/models")]
|
|
pub async fn get_available_models_handler(
|
|
_claims: Claims,
|
|
app_state: web::Data<crate::state::AppState>,
|
|
) -> impl Responder {
|
|
log::debug!("Fetching available models with capabilities");
|
|
|
|
let ollama_client = &app_state.ollama;
|
|
|
|
// Fetch models with capabilities from primary server
|
|
let primary_models =
|
|
match OllamaClient::list_models_with_capabilities(&ollama_client.primary_url).await {
|
|
Ok(models) => models,
|
|
Err(e) => {
|
|
log::warn!("Failed to fetch models from primary server: {:?}", e);
|
|
vec![]
|
|
}
|
|
};
|
|
|
|
let primary = ServerModels {
|
|
url: ollama_client.primary_url.clone(),
|
|
models: primary_models,
|
|
default_model: ollama_client.primary_model.clone(),
|
|
};
|
|
|
|
// Fetch models with capabilities from fallback server if configured
|
|
let fallback = if let Some(fallback_url) = &ollama_client.fallback_url {
|
|
match OllamaClient::list_models_with_capabilities(fallback_url).await {
|
|
Ok(models) => Some(ServerModels {
|
|
url: fallback_url.clone(),
|
|
models,
|
|
default_model: ollama_client
|
|
.fallback_model
|
|
.clone()
|
|
.unwrap_or_else(|| ollama_client.primary_model.clone()),
|
|
}),
|
|
Err(e) => {
|
|
log::warn!("Failed to fetch models from fallback server: {:?}", e);
|
|
None
|
|
}
|
|
}
|
|
} else {
|
|
None
|
|
};
|
|
|
|
let response = AvailableModelsResponse { primary, fallback };
|
|
|
|
HttpResponse::Ok().json(response)
|
|
}
|