Batch EXIF endpoint: GET /photos/exif
Adds a single round-trip projection of `image_exif` for every photo whose
`date_taken` falls in `[date_from, date_to]`. Wraps the existing
`ExifDao::query_by_exif` DAO method which already handles the SQL filter
in one query against the covering index — the only missing piece was
HTTP plumbing.
Designed for window-scoped consumers like Apollo's photo-to-track
matcher, which currently does N+1 (one `/photos` listing + one
`/image/metadata` per photo). Because `/image/metadata` serializes on
`Data<Mutex<dyn ExifDao>>`, that pattern can take 10s+ for windows with
hundreds of photos. The new endpoint takes one mutex acquisition for
the whole batch.
Response shape:
{ photos: [
{ file_path, library_id, library_name,
camera_model, width, height,
gps_latitude, gps_longitude, date_taken } ],
total: N }
Two notes on scope:
- Photos with NULL `date_taken` are excluded by `query_by_exif`'s
semantics. Filename-extracted dates are not synthesized here; rare
callers that need that fallback can still hit `/image/metadata`.
- GPS columns are stored as f32 in image_exif to keep row size small;
the JSON shape widens to f64 so clients don't have to know about the
on-disk precision.
Library names are pre-mapped from `app_state.libraries` once and
stamped on each row, avoiding an O(rows × libraries) linear scan.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
87
src/files.rs
87
src/files.rs
@@ -10,7 +10,10 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::Mutex;
|
||||
use std::time::SystemTime;
|
||||
|
||||
use crate::data::{Claims, FilesRequest, FilterMode, MediaType, PhotosResponse, SortType};
|
||||
use crate::data::{
|
||||
Claims, ExifBatchRequest, ExifBatchResponse, ExifSummary, FilesRequest, FilterMode, MediaType,
|
||||
PhotosResponse, SortType,
|
||||
};
|
||||
use crate::database::ExifDao;
|
||||
use crate::file_types;
|
||||
use crate::geo::{gps_bounding_box, haversine_distance};
|
||||
@@ -1180,6 +1183,88 @@ pub async fn get_gps_summary(
|
||||
}
|
||||
}
|
||||
|
||||
/// Handler for the batch EXIF endpoint at `GET /photos/exif`.
|
||||
///
|
||||
/// Returns a single-row projection of `image_exif` for every photo whose
|
||||
/// `date_taken` falls in `[date_from, date_to]`, across all libraries.
|
||||
/// Designed to replace the N+1 pattern of `/photos` + per-file
|
||||
/// `/image/metadata` for window-scoped consumers like Apollo's photo-to-
|
||||
/// track matcher: one DB query, one HTTP round-trip, one mutex acquisition.
|
||||
///
|
||||
/// Photos with no `date_taken` are excluded by construction (the underlying
|
||||
/// `query_by_exif` filter requires a non-null timestamp once a range is
|
||||
/// supplied). Filename-extracted dates are not synthesized here; if a
|
||||
/// caller needs that fallback, fetch the row separately via
|
||||
/// `/image/metadata` (rare path).
|
||||
pub async fn list_exif_summary(
|
||||
_: Claims,
|
||||
request: HttpRequest,
|
||||
req: Query<ExifBatchRequest>,
|
||||
exif_dao: Data<Mutex<Box<dyn ExifDao>>>,
|
||||
app_state: Data<AppState>,
|
||||
) -> Result<HttpResponse, actix_web::Error> {
|
||||
let parent_cx = extract_context_from_request(&request);
|
||||
let tracer = global_tracer();
|
||||
let mut span = tracer
|
||||
.span_builder("list_exif_summary")
|
||||
.start_with_context(&tracer, &parent_cx);
|
||||
|
||||
span.set_attribute(KeyValue::new(
|
||||
"date_from",
|
||||
req.date_from.map(|v| v.to_string()).unwrap_or_default(),
|
||||
));
|
||||
span.set_attribute(KeyValue::new(
|
||||
"date_to",
|
||||
req.date_to.map(|v| v.to_string()).unwrap_or_default(),
|
||||
));
|
||||
let cx = opentelemetry::Context::current_with_span(span);
|
||||
|
||||
// Pre-build an id → name map so we don't linear-scan libraries per row.
|
||||
let library_names: std::collections::HashMap<i32, String> = app_state
|
||||
.libraries
|
||||
.iter()
|
||||
.map(|lib| (lib.id, lib.name.clone()))
|
||||
.collect();
|
||||
|
||||
let mut exif_dao_guard = exif_dao.lock().expect("Unable to get ExifDao");
|
||||
match exif_dao_guard.query_by_exif(&cx, None, None, None, None, req.date_from, req.date_to) {
|
||||
Ok(rows) => {
|
||||
let photos: Vec<ExifSummary> = rows
|
||||
.into_iter()
|
||||
.map(|r| ExifSummary {
|
||||
library_name: library_names.get(&r.library_id).cloned(),
|
||||
file_path: r.file_path,
|
||||
library_id: r.library_id,
|
||||
camera_model: r.camera_model,
|
||||
width: r.width,
|
||||
height: r.height,
|
||||
// image_exif stores GPS as f32 to keep row size small;
|
||||
// widen for the JSON shape so clients don't need to
|
||||
// know about the on-disk precision.
|
||||
gps_latitude: r.gps_latitude.map(f64::from),
|
||||
gps_longitude: r.gps_longitude.map(f64::from),
|
||||
date_taken: r.date_taken,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let total = photos.len();
|
||||
cx.span()
|
||||
.set_attribute(KeyValue::new("result_count", total as i64));
|
||||
cx.span().set_status(Status::Ok);
|
||||
|
||||
Ok(HttpResponse::Ok().json(ExifBatchResponse { photos, total }))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error querying EXIF batch: {:?}", e);
|
||||
cx.span()
|
||||
.set_status(Status::error(format!("Database error: {:?}", e)));
|
||||
Ok(HttpResponse::InternalServerError().json(serde_json::json!({
|
||||
"error": "Failed to query EXIF data"
|
||||
})))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn move_file<FS: FileSystemAccess>(
|
||||
_: Claims,
|
||||
file_system: Data<FS>,
|
||||
|
||||
Reference in New Issue
Block a user