Two follow-ups to the PATCH endpoint:
1. GET /libraries now returns ``global_excluded_dirs`` alongside the
library list — the union-with-globals semantics is invisible from
the per-library row alone, and the admin UI needs to show what's
already being skipped before the operator adds entries that would
duplicate.
2. PATCH /libraries/{id} canonicalises the excluded_dirs string on
write via the new ``normalize_excluded_dirs_input``: trims per
entry, drops empties, dedupes preserving first-occurrence order,
comma-joins without inner whitespace. Empty / whitespace-only →
NULL. Round-trip stable so re-saving an entry produces an
identical row.
Five new tests cover the empty / whitespace, trim, dedup, round-trip,
and overlap-with-globals cases. effective_excluded_dirs continues to
keep overlapping entries between globals and per-library on purpose —
PathExcluder accepts repeats and there's no behavioural reason to
dedupe at merge time.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
844 lines
31 KiB
Rust
844 lines
31 KiB
Rust
use actix_web::{HttpResponse, Responder, get, patch, web, web::Data};
|
|
use chrono::Utc;
|
|
use diesel::prelude::*;
|
|
use diesel::sqlite::SqliteConnection;
|
|
use log::{info, warn};
|
|
use serde::Deserialize;
|
|
use std::collections::HashMap;
|
|
use std::path::{Path, PathBuf};
|
|
use std::sync::{Arc, RwLock};
|
|
|
|
use crate::data::Claims;
|
|
use crate::database::models::{InsertLibrary, LibraryRow};
|
|
use crate::database::schema::libraries;
|
|
use crate::state::AppState;
|
|
|
|
/// Id of the primary library row seeded by the multi-library migration.
|
|
/// Used as the default `library_id` during the Phase 2 transitional shim,
|
|
/// before handlers/callers are library-aware.
|
|
pub const PRIMARY_LIBRARY_ID: i32 = 1;
|
|
|
|
/// Placeholder value written into `libraries.root_path` by the migration.
|
|
/// Replaced on startup with the live `BASE_PATH` env var.
|
|
pub const ROOT_PATH_PLACEHOLDER: &str = "BASE_PATH_PLACEHOLDER";
|
|
|
|
/// A media library mount point: its numeric id, logical name, and absolute
|
|
/// root on disk. `rel_path` values stored in the DB are relative to this root.
|
|
#[derive(Clone, Debug, serde::Serialize)]
|
|
pub struct Library {
|
|
pub id: i32,
|
|
pub name: String,
|
|
pub root_path: String,
|
|
/// Operator kill switch (mirrors `libraries.enabled`). When `false`
|
|
/// the watcher skips this library entirely — before the probe,
|
|
/// before ingest, before maintenance. Reads / serving still work
|
|
/// (a request whose path resolves to a disabled library's root
|
|
/// will succeed if the file is on disk; nothing prevents that
|
|
/// today and there's no obvious reason to). Toggle via SQL.
|
|
pub enabled: bool,
|
|
/// Per-library excluded paths/patterns, parsed from the
|
|
/// comma-separated DB column. The walker applies these
|
|
/// **in union** with the global `EXCLUDED_DIRS` env var; either
|
|
/// list matching a path is enough to exclude. Empty = no
|
|
/// library-specific excludes (only the global env var applies).
|
|
pub excluded_dirs: Vec<String>,
|
|
}
|
|
|
|
impl Library {
|
|
/// Resolve a library-relative path into an absolute `PathBuf` under the
|
|
/// library root. Does not validate traversal — use `is_valid_full_path`
|
|
/// for untrusted input.
|
|
#[allow(dead_code)]
|
|
pub fn resolve(&self, rel_path: &str) -> PathBuf {
|
|
Path::new(&self.root_path).join(rel_path)
|
|
}
|
|
|
|
/// Inverse of `resolve`: given an absolute path under this library's
|
|
/// root, return the root-relative portion. Returns `None` if the path
|
|
/// is not under the library.
|
|
#[allow(dead_code)]
|
|
pub fn strip_root(&self, abs_path: &Path) -> Option<String> {
|
|
abs_path
|
|
.strip_prefix(&self.root_path)
|
|
.ok()
|
|
.map(|p| p.to_string_lossy().replace('\\', "/"))
|
|
}
|
|
|
|
/// Effective excluded directories for a walk of this library:
|
|
/// the union of the global env-var excludes (passed in by the
|
|
/// caller as `globals`) and this library's per-row excludes.
|
|
/// Order doesn't matter; `PathExcluder` accepts repeats.
|
|
pub fn effective_excluded_dirs(&self, globals: &[String]) -> Vec<String> {
|
|
if self.excluded_dirs.is_empty() {
|
|
return globals.to_vec();
|
|
}
|
|
let mut combined: Vec<String> =
|
|
Vec::with_capacity(globals.len() + self.excluded_dirs.len());
|
|
combined.extend_from_slice(globals);
|
|
combined.extend(self.excluded_dirs.iter().cloned());
|
|
combined
|
|
}
|
|
}
|
|
|
|
/// Parse a comma-separated excluded_dirs column into a Vec, dropping
|
|
/// empty entries (mirrors `AppState::parse_excluded_dirs` for the env
|
|
/// var). NULL → empty Vec. Duplicates are preserved — `PathExcluder`
|
|
/// accepts repeats, and the storage-side normaliser is where dedup
|
|
/// happens.
|
|
pub fn parse_excluded_dirs_column(raw: Option<&str>) -> Vec<String> {
|
|
match raw {
|
|
None => Vec::new(),
|
|
Some(s) => s
|
|
.split(',')
|
|
.map(str::trim)
|
|
.filter(|s| !s.is_empty())
|
|
.map(String::from)
|
|
.collect(),
|
|
}
|
|
}
|
|
|
|
/// Canonicalise an excluded_dirs string for storage: parse → trim →
|
|
/// dedupe (preserving insertion order) → comma-join with no inner
|
|
/// whitespace. Empty / whitespace-only input → `None` (writes NULL).
|
|
///
|
|
/// Used by `PATCH /libraries/{id}` so two users typing the same entries
|
|
/// in different orders / casings / whitespace land on the same stored
|
|
/// form, and a typo'd duplicate (`@eaDir, @eaDir`) collapses on save.
|
|
/// Round-trip stable: writing the output back through this function
|
|
/// yields the same string.
|
|
pub fn normalize_excluded_dirs_input(raw: &str) -> Option<String> {
|
|
let parsed = parse_excluded_dirs_column(Some(raw));
|
|
if parsed.is_empty() {
|
|
return None;
|
|
}
|
|
let mut seen = std::collections::HashSet::new();
|
|
let deduped: Vec<String> = parsed
|
|
.into_iter()
|
|
.filter(|s| seen.insert(s.clone()))
|
|
.collect();
|
|
if deduped.is_empty() {
|
|
None
|
|
} else {
|
|
Some(deduped.join(","))
|
|
}
|
|
}
|
|
|
|
impl From<LibraryRow> for Library {
|
|
fn from(row: LibraryRow) -> Self {
|
|
Library {
|
|
id: row.id,
|
|
name: row.name,
|
|
root_path: row.root_path,
|
|
enabled: row.enabled,
|
|
excluded_dirs: parse_excluded_dirs_column(row.excluded_dirs.as_deref()),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Load all library rows from the database into `Library` values.
|
|
pub fn load_all(conn: &mut SqliteConnection) -> Vec<Library> {
|
|
libraries::table
|
|
.order(libraries::id.asc())
|
|
.load::<LibraryRow>(conn)
|
|
.unwrap_or_else(|e| {
|
|
warn!("Failed to load libraries table: {:?}", e);
|
|
Vec::new()
|
|
})
|
|
.into_iter()
|
|
.map(Library::from)
|
|
.collect()
|
|
}
|
|
|
|
/// Ensure at least one library exists and that the seeded placeholder row is
|
|
/// patched with the live `BASE_PATH`. Safe to call on every startup; it only
|
|
/// writes when the placeholder is still present.
|
|
pub fn seed_or_patch_from_env(conn: &mut SqliteConnection, base_path: &str) {
|
|
// Check whether the primary row still carries the placeholder from the
|
|
// migration. If so, replace it with the live BASE_PATH.
|
|
let placeholder_count: i64 = libraries::table
|
|
.filter(libraries::root_path.eq(ROOT_PATH_PLACEHOLDER))
|
|
.count()
|
|
.get_result(conn)
|
|
.unwrap_or(0);
|
|
|
|
if placeholder_count > 0 {
|
|
diesel::update(libraries::table.filter(libraries::root_path.eq(ROOT_PATH_PLACEHOLDER)))
|
|
.set(libraries::root_path.eq(base_path))
|
|
.execute(conn)
|
|
.map(|rows| {
|
|
info!(
|
|
"Patched {} library row(s) with BASE_PATH='{}'",
|
|
rows, base_path
|
|
);
|
|
})
|
|
.unwrap_or_else(|e| warn!("Failed to patch library root_path: {:?}", e));
|
|
return;
|
|
}
|
|
|
|
// If no rows exist at all (e.g. table created outside the seeded migration),
|
|
// insert a primary library pointing at BASE_PATH.
|
|
let total: i64 = libraries::table.count().get_result(conn).unwrap_or(0);
|
|
if total == 0 {
|
|
let now = Utc::now().timestamp();
|
|
let result = diesel::insert_into(libraries::table)
|
|
.values(InsertLibrary {
|
|
name: "main",
|
|
root_path: base_path,
|
|
created_at: now,
|
|
enabled: true,
|
|
excluded_dirs: None,
|
|
})
|
|
.execute(conn);
|
|
match result {
|
|
Ok(_) => info!(
|
|
"Seeded primary library 'main' with BASE_PATH='{}'",
|
|
base_path
|
|
),
|
|
Err(e) => warn!("Failed to seed primary library: {:?}", e),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Resolve a library request parameter (accepts numeric id as string or name)
|
|
/// against the configured libraries. Returns `Ok(None)` when the param is
|
|
/// absent, meaning "span all libraries". Returns `Err` when a value is
|
|
/// provided but does not match any library.
|
|
pub fn resolve_library_param<'a>(
|
|
state: &'a AppState,
|
|
param: Option<&str>,
|
|
) -> Result<Option<&'a Library>, String> {
|
|
let Some(raw) = param.map(str::trim).filter(|s| !s.is_empty()) else {
|
|
return Ok(None);
|
|
};
|
|
|
|
if let Ok(id) = raw.parse::<i32>() {
|
|
return state
|
|
.library_by_id(id)
|
|
.map(Some)
|
|
.ok_or_else(|| format!("unknown library id: {}", id));
|
|
}
|
|
|
|
state
|
|
.library_by_name(raw)
|
|
.map(Some)
|
|
.ok_or_else(|| format!("unknown library name: {}", raw))
|
|
}
|
|
|
|
/// Health of a library at a point in time. Probed at the top of each
|
|
/// file-watcher tick. The `Stale` state is the "be conservative" signal:
|
|
/// destructive paths (ingest writes, future move-handoff and orphan GC in
|
|
/// branches B/C) skip a stale library, but reads/serving stay unaffected.
|
|
///
|
|
/// See `CLAUDE.md` → "Library availability and safety" for the policy.
|
|
#[derive(Clone, Debug, serde::Serialize, PartialEq, Eq)]
|
|
#[serde(tag = "state", rename_all = "snake_case")]
|
|
pub enum LibraryHealth {
|
|
Online,
|
|
Stale {
|
|
reason: String,
|
|
/// Unix timestamp (seconds) of the most recent transition into
|
|
/// Stale. Held for telemetry / `/libraries` surfacing only —
|
|
/// gating logic doesn't read it.
|
|
since: i64,
|
|
},
|
|
}
|
|
|
|
impl LibraryHealth {
|
|
pub fn is_online(&self) -> bool {
|
|
matches!(self, LibraryHealth::Online)
|
|
}
|
|
}
|
|
|
|
/// Shared snapshot of every configured library's health, keyed by
|
|
/// `library_id`. The watcher writes; HTTP handlers read. RwLock because
|
|
/// reads vastly outnumber writes (one tick vs. every status request).
|
|
pub type LibraryHealthMap = Arc<RwLock<HashMap<i32, LibraryHealth>>>;
|
|
|
|
/// Construct an initial health map. Libraries start `Online`; the first
|
|
/// probe will downgrade any that fail. Starting `Stale` would block ingest
|
|
/// for the watcher's first tick on a healthy mount, which is the wrong
|
|
/// default for a server that's just been restarted.
|
|
pub fn new_health_map(libs: &[Library]) -> LibraryHealthMap {
|
|
let mut m = HashMap::with_capacity(libs.len());
|
|
for lib in libs {
|
|
m.insert(lib.id, LibraryHealth::Online);
|
|
}
|
|
Arc::new(RwLock::new(m))
|
|
}
|
|
|
|
/// Probe a library's mount point. Cheap: stat + open dir + peek one entry.
|
|
///
|
|
/// `had_data` is the caller's prior knowledge that this library has been
|
|
/// non-empty before — typically `image_exif` row count > 0. When true, an
|
|
/// empty directory is suspicious (it's how an unmounted NFS share looks);
|
|
/// when false, it's accepted as a fresh mount that simply hasn't been
|
|
/// indexed yet.
|
|
///
|
|
/// Note: stat / read_dir on a hard-mounted, unreachable NFS share can
|
|
/// block. The watcher accepts that risk for now — the worst case is that
|
|
/// the tick stalls until the mount returns, which is no more destructive
|
|
/// than the pre-probe behavior. A future enhancement can wrap this in a
|
|
/// thread + timeout if it becomes an operational issue.
|
|
pub fn probe_online(lib: &Library, had_data: bool) -> LibraryHealth {
|
|
let now = Utc::now().timestamp();
|
|
let path = Path::new(&lib.root_path);
|
|
|
|
let metadata = match std::fs::metadata(path) {
|
|
Ok(m) => m,
|
|
Err(e) => {
|
|
return LibraryHealth::Stale {
|
|
reason: format!("root_path stat failed: {}", e),
|
|
since: now,
|
|
};
|
|
}
|
|
};
|
|
if !metadata.is_dir() {
|
|
return LibraryHealth::Stale {
|
|
reason: format!("root_path is not a directory: {}", lib.root_path),
|
|
since: now,
|
|
};
|
|
}
|
|
|
|
let mut entries = match std::fs::read_dir(path) {
|
|
Ok(it) => it,
|
|
Err(e) => {
|
|
return LibraryHealth::Stale {
|
|
reason: format!("read_dir failed: {}", e),
|
|
since: now,
|
|
};
|
|
}
|
|
};
|
|
|
|
// Empty directory only counts as Stale when we have prior evidence
|
|
// this library used to have content. A genuinely fresh mount is
|
|
// legitimately empty, and degrading it would block first-time ingest.
|
|
if had_data && entries.next().is_none() {
|
|
return LibraryHealth::Stale {
|
|
reason: "library is empty but image_exif has rows for it".to_string(),
|
|
since: now,
|
|
};
|
|
}
|
|
|
|
LibraryHealth::Online
|
|
}
|
|
|
|
/// Probe `lib`, update `map`, and return the new state. Logs only on a
|
|
/// state transition (Online↔Stale) so a long outage doesn't spam at every
|
|
/// tick — operators get one warn on the way down and one info on the way
|
|
/// up.
|
|
pub fn refresh_health(map: &LibraryHealthMap, lib: &Library, had_data: bool) -> LibraryHealth {
|
|
let new_state = probe_online(lib, had_data);
|
|
let mut guard = map.write().unwrap_or_else(|e| e.into_inner());
|
|
let prev = guard.get(&lib.id).cloned();
|
|
let transitioned = matches!(
|
|
(&prev, &new_state),
|
|
(None, LibraryHealth::Stale { .. })
|
|
| (Some(LibraryHealth::Online), LibraryHealth::Stale { .. })
|
|
| (Some(LibraryHealth::Stale { .. }), LibraryHealth::Online)
|
|
);
|
|
if transitioned {
|
|
match &new_state {
|
|
LibraryHealth::Online => info!(
|
|
"Library '{}' (id={}) recovered: {} is online",
|
|
lib.name, lib.id, lib.root_path
|
|
),
|
|
LibraryHealth::Stale { reason, .. } => warn!(
|
|
"Library '{}' (id={}) is STALE — pausing writes. Reason: {}. Path: {}",
|
|
lib.name, lib.id, reason, lib.root_path
|
|
),
|
|
}
|
|
}
|
|
guard.insert(lib.id, new_state.clone());
|
|
new_state
|
|
}
|
|
|
|
/// Snapshot of one library + its current health, for `/libraries`.
|
|
#[derive(serde::Serialize)]
|
|
pub struct LibraryStatus {
|
|
#[serde(flatten)]
|
|
pub library: Library,
|
|
pub health: LibraryHealth,
|
|
}
|
|
|
|
#[derive(serde::Serialize)]
|
|
pub struct LibrariesResponse {
|
|
pub libraries: Vec<LibraryStatus>,
|
|
/// Globally-excluded paths/patterns from the `EXCLUDED_DIRS` env var.
|
|
/// Applied **in union** with each library's own `excluded_dirs`. Surfaced
|
|
/// here so an admin UI can show the operator "you already skip these
|
|
/// everywhere" before they add per-library entries that would duplicate
|
|
/// the global list. Read-only — globals live in `.env` and aren't
|
|
/// mutable via the API today.
|
|
pub global_excluded_dirs: Vec<String>,
|
|
}
|
|
|
|
#[get("/libraries")]
|
|
pub async fn list_libraries(_claims: Claims, app_state: Data<AppState>) -> impl Responder {
|
|
// Read from the live view so a recent PATCH /libraries/{id} that
|
|
// flipped `enabled` or rewrote `excluded_dirs` surfaces immediately
|
|
// — the immutable `app_state.libraries` snapshot is stale once the
|
|
// first mutation lands.
|
|
let live_guard = app_state
|
|
.live_libraries
|
|
.read()
|
|
.unwrap_or_else(|e| e.into_inner());
|
|
let health_guard = app_state
|
|
.library_health
|
|
.read()
|
|
.unwrap_or_else(|e| e.into_inner());
|
|
let libraries = live_guard
|
|
.iter()
|
|
.map(|lib| LibraryStatus {
|
|
library: lib.clone(),
|
|
health: health_guard
|
|
.get(&lib.id)
|
|
.cloned()
|
|
.unwrap_or(LibraryHealth::Online),
|
|
})
|
|
.collect();
|
|
HttpResponse::Ok().json(LibrariesResponse {
|
|
libraries,
|
|
global_excluded_dirs: app_state.excluded_dirs.clone(),
|
|
})
|
|
}
|
|
|
|
/// Body for PATCH /libraries/{id}. Both fields are optional — omitting
|
|
/// one leaves it untouched. `excluded_dirs` is the same comma-separated
|
|
/// shape as the DB column; an empty string clears (writes NULL).
|
|
#[derive(Deserialize, Debug)]
|
|
pub struct PatchLibraryBody {
|
|
pub enabled: Option<bool>,
|
|
pub excluded_dirs: Option<String>,
|
|
}
|
|
|
|
/// Mutate one library row. The watcher reads `app_state.live_libraries`
|
|
/// at the top of each tick, so a successful PATCH is picked up within
|
|
/// one WATCH_QUICK_INTERVAL_SECONDS without restart — no separate
|
|
/// `apply_now` signal. Returns the updated `Library` so the caller can
|
|
/// render the new state without a follow-up GET.
|
|
///
|
|
/// Despite CLAUDE.md noting "Toggle via SQL; there is intentionally no
|
|
/// HTTP endpoint for library mutation", we now expose this for Apollo's
|
|
/// Settings panel. The single-user trust model hasn't changed; the
|
|
/// endpoint just removes the SSH-and-sqlite3 step.
|
|
#[patch("/libraries/{id}")]
|
|
pub async fn patch_library(
|
|
_claims: Claims,
|
|
path: web::Path<i32>,
|
|
body: web::Json<PatchLibraryBody>,
|
|
app_state: Data<AppState>,
|
|
) -> impl Responder {
|
|
let lib_id = path.into_inner();
|
|
let body = body.into_inner();
|
|
|
|
if body.enabled.is_none() && body.excluded_dirs.is_none() {
|
|
return HttpResponse::UnprocessableEntity().body("empty patch body");
|
|
}
|
|
|
|
let mut conn = crate::database::connect();
|
|
|
|
// Build the SET clause. Diesel's set() takes a tuple of assignments;
|
|
// we apply each field independently so an absent field doesn't get
|
|
// forced to NULL / its default.
|
|
let mut affected = 0usize;
|
|
if let Some(enabled) = body.enabled {
|
|
match diesel::update(libraries::table.filter(libraries::id.eq(lib_id)))
|
|
.set(libraries::enabled.eq(enabled))
|
|
.execute(&mut conn)
|
|
{
|
|
Ok(n) => affected = affected.max(n),
|
|
Err(e) => {
|
|
warn!("PATCH /libraries/{}: enabled update failed: {:?}", lib_id, e);
|
|
return HttpResponse::InternalServerError().body(format!("{}", e));
|
|
}
|
|
}
|
|
}
|
|
if let Some(raw) = body.excluded_dirs.as_deref() {
|
|
// Canonicalise on write — trim, dedupe, drop empties — so the DB
|
|
// stores a round-trip-stable form regardless of how messy the
|
|
// user typed it. Empty / whitespace-only → NULL (matches a
|
|
// never-set library, parse_excluded_dirs_column returns []).
|
|
let normalised = normalize_excluded_dirs_input(raw);
|
|
let stored: Option<&str> = normalised.as_deref();
|
|
match diesel::update(libraries::table.filter(libraries::id.eq(lib_id)))
|
|
.set(libraries::excluded_dirs.eq(stored))
|
|
.execute(&mut conn)
|
|
{
|
|
Ok(n) => affected = affected.max(n),
|
|
Err(e) => {
|
|
warn!(
|
|
"PATCH /libraries/{}: excluded_dirs update failed: {:?}",
|
|
lib_id, e
|
|
);
|
|
return HttpResponse::InternalServerError().body(format!("{}", e));
|
|
}
|
|
}
|
|
}
|
|
|
|
if affected == 0 {
|
|
return HttpResponse::NotFound().body(format!("library id {} not found", lib_id));
|
|
}
|
|
|
|
// Refresh the live view from the canonical DB state. Reloading the
|
|
// whole table (rather than mutating one entry in place) is cheap
|
|
// (handful of rows) and keeps the in-memory and DB views trivially
|
|
// consistent.
|
|
let fresh = load_all(&mut conn);
|
|
let updated = fresh.iter().find(|l| l.id == lib_id).cloned();
|
|
{
|
|
let mut live = app_state
|
|
.live_libraries
|
|
.write()
|
|
.unwrap_or_else(|e| e.into_inner());
|
|
*live = fresh;
|
|
}
|
|
|
|
match updated {
|
|
Some(lib) => {
|
|
info!(
|
|
"PATCH /libraries/{}: enabled={:?} excluded_dirs={:?} → applied",
|
|
lib_id, body.enabled, body.excluded_dirs
|
|
);
|
|
HttpResponse::Ok().json(lib)
|
|
}
|
|
None => HttpResponse::NotFound().body(format!("library id {} not found after update", lib_id)),
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use crate::database::test::in_memory_db_connection;
|
|
|
|
#[test]
|
|
fn seed_patches_placeholder() {
|
|
let mut conn = in_memory_db_connection();
|
|
// Migration seeds one row with the placeholder.
|
|
seed_or_patch_from_env(&mut conn, "/tmp/media");
|
|
let libs = load_all(&mut conn);
|
|
assert_eq!(libs.len(), 1);
|
|
assert_eq!(libs[0].id, 1);
|
|
assert_eq!(libs[0].name, "main");
|
|
assert_eq!(libs[0].root_path, "/tmp/media");
|
|
}
|
|
|
|
#[test]
|
|
fn seed_is_idempotent() {
|
|
let mut conn = in_memory_db_connection();
|
|
seed_or_patch_from_env(&mut conn, "/tmp/media");
|
|
seed_or_patch_from_env(&mut conn, "/tmp/other");
|
|
// Second call should not overwrite an already-patched row.
|
|
let libs = load_all(&mut conn);
|
|
assert_eq!(libs.len(), 1);
|
|
assert_eq!(libs[0].root_path, "/tmp/media");
|
|
}
|
|
|
|
#[test]
|
|
fn library_strip_root() {
|
|
let lib = Library {
|
|
id: 1,
|
|
name: "main".into(),
|
|
root_path: "/tmp/media".into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
};
|
|
let rel = lib.strip_root(Path::new("/tmp/media/2024/photo.jpg"));
|
|
assert_eq!(rel.as_deref(), Some("2024/photo.jpg"));
|
|
let outside = lib.strip_root(Path::new("/etc/passwd"));
|
|
assert!(outside.is_none());
|
|
}
|
|
|
|
#[test]
|
|
fn library_resolve_joins_under_root() {
|
|
let lib = Library {
|
|
id: 1,
|
|
name: "main".into(),
|
|
root_path: "/tmp/media".into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
};
|
|
let abs = lib.resolve("2024/photo.jpg");
|
|
assert_eq!(abs, PathBuf::from("/tmp/media/2024/photo.jpg"));
|
|
}
|
|
|
|
fn state_with_libraries(libs: Vec<Library>) -> AppState {
|
|
let mut state = AppState::test_state();
|
|
state.libraries = libs;
|
|
state
|
|
}
|
|
|
|
fn sample_libraries() -> Vec<Library> {
|
|
vec![
|
|
Library {
|
|
id: 1,
|
|
name: "main".into(),
|
|
root_path: "/tmp/main".into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
},
|
|
Library {
|
|
id: 7,
|
|
name: "archive".into(),
|
|
root_path: "/tmp/archive".into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
},
|
|
]
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_absent_is_union() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
assert!(matches!(resolve_library_param(&state, None), Ok(None)));
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_empty_or_whitespace_is_union() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
assert!(matches!(resolve_library_param(&state, Some("")), Ok(None)));
|
|
assert!(matches!(
|
|
resolve_library_param(&state, Some(" ")),
|
|
Ok(None)
|
|
));
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_numeric_id_matches() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
let lib = resolve_library_param(&state, Some("7"))
|
|
.expect("valid id")
|
|
.expect("some library");
|
|
assert_eq!(lib.id, 7);
|
|
assert_eq!(lib.name, "archive");
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_name_matches() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
let lib = resolve_library_param(&state, Some("main"))
|
|
.expect("valid name")
|
|
.expect("some library");
|
|
assert_eq!(lib.id, 1);
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_unknown_id_errs() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
let err = resolve_library_param(&state, Some("999")).unwrap_err();
|
|
assert!(err.contains("unknown library id"));
|
|
}
|
|
|
|
#[actix_rt::test]
|
|
async fn resolve_library_param_unknown_name_errs() {
|
|
let state = state_with_libraries(sample_libraries());
|
|
let err = resolve_library_param(&state, Some("missing")).unwrap_err();
|
|
assert!(err.contains("unknown library name"));
|
|
}
|
|
|
|
#[test]
|
|
fn parse_excluded_dirs_column_handles_null_and_whitespace() {
|
|
assert_eq!(parse_excluded_dirs_column(None), Vec::<String>::new());
|
|
assert_eq!(parse_excluded_dirs_column(Some("")), Vec::<String>::new());
|
|
assert_eq!(
|
|
parse_excluded_dirs_column(Some(" /a , /b/sub , @eaDir ,, ")),
|
|
vec!["/a".to_string(), "/b/sub".to_string(), "@eaDir".to_string()]
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn effective_excluded_dirs_unions_global_and_per_library() {
|
|
let lib_no_extras = Library {
|
|
id: 1,
|
|
name: "main".into(),
|
|
root_path: "/x".into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
};
|
|
let globals = vec!["@eaDir".to_string(), ".thumbnails".to_string()];
|
|
// Empty per-library excludes → exactly the globals.
|
|
assert_eq!(lib_no_extras.effective_excluded_dirs(&globals), globals);
|
|
|
|
let lib_with_extras = Library {
|
|
id: 2,
|
|
name: "archive".into(),
|
|
root_path: "/y".into(),
|
|
enabled: true,
|
|
excluded_dirs: vec!["/photos".to_string()],
|
|
};
|
|
let combined = lib_with_extras.effective_excluded_dirs(&globals);
|
|
assert!(combined.contains(&"@eaDir".to_string()));
|
|
assert!(combined.contains(&".thumbnails".to_string()));
|
|
assert!(combined.contains(&"/photos".to_string()));
|
|
assert_eq!(combined.len(), 3);
|
|
}
|
|
|
|
#[test]
|
|
fn effective_excluded_dirs_keeps_overlap_between_global_and_per_library() {
|
|
// Two sources both excluding `@eaDir` is legal — `PathExcluder`
|
|
// accepts repeats, and there's no behavioral reason to dedupe
|
|
// here. Documents the design choice so a future refactor that
|
|
// tightens this is forced to update both code and tests.
|
|
let globals = vec!["@eaDir".to_string()];
|
|
let lib = Library {
|
|
id: 1,
|
|
name: "main".into(),
|
|
root_path: "/x".into(),
|
|
enabled: true,
|
|
excluded_dirs: vec!["@eaDir".to_string(), "/private".to_string()],
|
|
};
|
|
let combined = lib.effective_excluded_dirs(&globals);
|
|
// 2 occurrences of @eaDir + /private = 3 entries total.
|
|
assert_eq!(combined, vec!["@eaDir", "@eaDir", "/private"]);
|
|
}
|
|
|
|
#[test]
|
|
fn normalize_excluded_dirs_input_handles_empty_and_whitespace() {
|
|
assert_eq!(normalize_excluded_dirs_input(""), None);
|
|
assert_eq!(normalize_excluded_dirs_input(" "), None);
|
|
assert_eq!(normalize_excluded_dirs_input(",,,"), None);
|
|
assert_eq!(normalize_excluded_dirs_input(" , , "), None);
|
|
}
|
|
|
|
#[test]
|
|
fn normalize_excluded_dirs_input_trims_per_entry() {
|
|
// Inner whitespace stripped on each item, comma-joined without
|
|
// spaces. Mirrors how parse_excluded_dirs_column reads it back.
|
|
assert_eq!(
|
|
normalize_excluded_dirs_input(" @eaDir , /private , .thumbnails "),
|
|
Some("@eaDir,/private,.thumbnails".to_string())
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn normalize_excluded_dirs_input_dedupes_preserving_first_occurrence() {
|
|
// Exact-string duplicates collapse; the first occurrence wins
|
|
// (preserves the operator's typed order so they recognise their
|
|
// intent on round-trip).
|
|
assert_eq!(
|
|
normalize_excluded_dirs_input("@eaDir, /private, @eaDir, /private"),
|
|
Some("@eaDir,/private".to_string())
|
|
);
|
|
// Whitespace-distinct entries collapse to the same canonical
|
|
// form. Case is preserved — `Foo` and `foo` are different keys
|
|
// (filesystem case-sensitivity is platform-dependent; we don't
|
|
// make that call here).
|
|
assert_eq!(
|
|
normalize_excluded_dirs_input(" Foo,foo, Foo "),
|
|
Some("Foo,foo".to_string())
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn normalize_excluded_dirs_input_is_round_trip_stable() {
|
|
// Writing the normaliser's output back through it yields the
|
|
// same string. PATCH-clearing edits round-trip cleanly through
|
|
// parse_excluded_dirs_column too.
|
|
let raw = " /a/b ,, /a/b , c ";
|
|
let once = normalize_excluded_dirs_input(raw).expect("not empty");
|
|
let twice = normalize_excluded_dirs_input(&once).expect("not empty");
|
|
assert_eq!(once, twice);
|
|
// Parsing the stored form back gives the deduped Vec.
|
|
assert_eq!(
|
|
parse_excluded_dirs_column(Some(&once)),
|
|
vec!["/a/b".to_string(), "c".to_string()]
|
|
);
|
|
}
|
|
|
|
fn probe_lib(id: i32, root: String) -> Library {
|
|
Library {
|
|
id,
|
|
name: "main".into(),
|
|
root_path: root,
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn probe_online_for_existing_non_empty_dir() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
std::fs::write(tmp.path().join("photo.jpg"), b"hello").unwrap();
|
|
let lib = probe_lib(1, tmp.path().to_string_lossy().into());
|
|
// had_data doesn't matter when the dir has entries.
|
|
assert!(probe_online(&lib, true).is_online());
|
|
assert!(probe_online(&lib, false).is_online());
|
|
}
|
|
|
|
#[test]
|
|
fn probe_stale_when_root_missing() {
|
|
let lib = probe_lib(1, "/nonexistent/definitely/not/here".into());
|
|
assert!(matches!(
|
|
probe_online(&lib, false),
|
|
LibraryHealth::Stale { .. }
|
|
));
|
|
}
|
|
|
|
#[test]
|
|
fn probe_stale_when_root_is_a_file() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let file = tmp.path().join("not-a-dir");
|
|
std::fs::write(&file, b"x").unwrap();
|
|
let lib = probe_lib(1, file.to_string_lossy().into());
|
|
assert!(matches!(
|
|
probe_online(&lib, false),
|
|
LibraryHealth::Stale { .. }
|
|
));
|
|
}
|
|
|
|
#[test]
|
|
fn probe_empty_dir_is_online_when_no_prior_data() {
|
|
// Fresh mount: empty directory, no rows in image_exif. Accept it.
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let lib = probe_lib(1, tmp.path().to_string_lossy().into());
|
|
assert!(probe_online(&lib, false).is_online());
|
|
}
|
|
|
|
#[test]
|
|
fn probe_empty_dir_is_stale_when_prior_data_existed() {
|
|
// The "share went offline" signal: directory exists but is empty,
|
|
// and we know the library used to have content. Treat as Stale.
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let lib = probe_lib(1, tmp.path().to_string_lossy().into());
|
|
match probe_online(&lib, true) {
|
|
LibraryHealth::Stale { reason, .. } => {
|
|
assert!(reason.contains("empty"), "unexpected reason: {}", reason)
|
|
}
|
|
other => panic!("expected Stale, got {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn refresh_health_logs_only_on_transition() {
|
|
// Smoke test: refresh_health updates the map and reports correctly.
|
|
// (We can't easily assert on logs without a custom logger; the
|
|
// important thing is that the state churns properly.)
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let lib = Library {
|
|
id: 42,
|
|
name: "test".into(),
|
|
root_path: tmp.path().to_string_lossy().into(),
|
|
enabled: true,
|
|
excluded_dirs: Vec::new(),
|
|
};
|
|
let map = new_health_map(&[lib.clone()]);
|
|
|
|
// First probe: empty dir, no prior data — Online.
|
|
let s1 = refresh_health(&map, &lib, false);
|
|
assert!(s1.is_online());
|
|
|
|
// Probe again with had_data=true on the same empty dir — Stale.
|
|
let s2 = refresh_health(&map, &lib, true);
|
|
assert!(matches!(s2, LibraryHealth::Stale { .. }));
|
|
assert_eq!(
|
|
map.read().unwrap().get(&lib.id).cloned(),
|
|
Some(s2.clone()),
|
|
"map should reflect the latest probe"
|
|
);
|
|
|
|
// Recovery: drop a file and probe again.
|
|
std::fs::write(tmp.path().join("photo.jpg"), b"x").unwrap();
|
|
let s3 = refresh_health(&map, &lib, true);
|
|
assert!(s3.is_online());
|
|
}
|
|
}
|