Face Recognition / People Integration #61

Merged
cameron merged 23 commits from feature/face-recog-phase3-file-watch into master 2026-04-30 17:22:09 +00:00
Showing only changes of commit c2c1fe5b8b - Show all commits

View File

@@ -20,6 +20,7 @@
use crate::Claims;
use crate::ai::face_client::{DetectMeta, FaceClient, FaceDetectError};
use crate::exif;
use crate::database::schema::{face_detections, image_exif, persons};
use crate::error::IntoHttpError;
use crate::libraries::{self, Library};
@@ -2508,7 +2509,18 @@ fn crop_image_to_bbox(
if nw <= 0.0 || nh <= 0.0 || nx + nw > 1.001 || ny + nh > 1.001 {
return Err(anyhow!("bbox wh out of bounds or zero"));
}
let img = image::open(abs_path).with_context(|| format!("open {:?}", abs_path))?;
let raw = image::open(abs_path).with_context(|| format!("open {:?}", abs_path))?;
// EXIF rotation: the bbox arrives in display space (the carousel /
// overlay are rendered post-rotation by the browser), but the
// `image` crate hands us raw pre-rotation pixels. For any phone
// photo with Orientation 6/8/etc., applying the bbox without
// rotating first lands the crop on a completely different region
// of the image — which is why manually-drawn bboxes basically
// never resolved a face on re-detection. Apply the orientation
// first, then index into the canonical-oriented dims. Photos with
// no EXIF rotation tag pay nothing (apply_orientation is a no-op).
let orientation = exif::read_orientation(abs_path).unwrap_or(1);
let img = exif::apply_orientation(raw, orientation);
let (w, h) = img.dimensions();
let px = (nx * w as f32).round().clamp(0.0, w as f32 - 1.0) as u32;
let py = (ny * h as f32).round().clamp(0.0, h as f32 - 1.0) as u32;
@@ -2517,11 +2529,17 @@ fn crop_image_to_bbox(
if pw == 0 || ph == 0 {
return Err(anyhow!("crop produced zero-dim image"));
}
// Pad the crop a bit so the detector has context — a tightly-drawn
// bbox often clips ears/jaw which hurts the embedding. 10% on each
// side is a reasonable default.
let pad_x = (pw / 10).max(1);
let pad_y = (ph / 10).max(1);
// Generous padding so RetinaFace has anchor-friendly context.
// Insightface internally resizes to det_size=640 (square). A
// tightly-drawn 200×250 face bbox + 10 % padding becomes ~240×300,
// which after resize fills ~95 % of the input — near the upper
// edge of RetinaFace's anchor scales, where it routinely returns
// zero detections. Padding to 50 % on each side makes the crop
// 2× the bbox dims (face occupies ~50 % of the input), where
// anchors hit cleanly. Bbox is clamped to image bounds, so
// edge-of-image bboxes just get less padding on the clipped side.
let pad_x = (pw / 2).max(1);
let pad_y = (ph / 2).max(1);
let cx = px.saturating_sub(pad_x);
let cy = py.saturating_sub(pad_y);
let cw = (pw + 2 * pad_x).min(w - cx);