Compare commits
345 Commits
feature/im
...
f50655fb21
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f50655fb21 | ||
|
|
5bf49568f1 | ||
| f358e83050 | |||
|
|
db9dc63e5e | ||
| 9443c91f88 | |||
|
|
96c539764c | ||
|
|
675b4a4849 | ||
|
|
5e1bad3179 | ||
|
|
1971eeccd6 | ||
|
|
c2c1fe5b8b | ||
|
|
5a2f406429 | ||
|
|
6a6a4a6a46 | ||
|
|
3112260dc8 | ||
|
|
16abacf4c5 | ||
|
|
891a9982ef | ||
|
|
0eaf27d2d3 | ||
|
|
0c2f421a1f | ||
|
|
43cb60d3ad | ||
|
|
7303fb8aa3 | ||
|
|
0e160f5d22 | ||
|
|
a24fac5511 | ||
|
|
23f4941471 | ||
|
|
41f93d70d1 | ||
|
|
1859399759 | ||
|
|
f985a0d658 | ||
|
|
4dee7b6f73 | ||
|
|
f77e44b34d | ||
|
|
860169032b | ||
| 6642db3c8b | |||
|
|
57fb0bcd3c | ||
|
|
4ae7be35e9 | ||
| 9d58547ce3 | |||
|
|
6521a328bf | ||
|
|
00b3c80141 | ||
| a53c3ae514 | |||
|
|
7621282419 | ||
|
|
c6f82ebaba | ||
| 9cf3af383d | |||
|
|
b9d5578653 | ||
|
|
d5f944c7b6 | ||
| 2db611e1c1 | |||
|
|
21e624da6b | ||
|
|
021d1bffc0 | ||
|
|
fa21b0d73d | ||
|
|
0e55a6b125 | ||
|
|
0ebc2e9003 | ||
|
|
e5781325c6 | ||
|
|
d43f5fc991 | ||
|
|
f0ae9f95dc | ||
|
|
29f32b9d22 | ||
|
|
13b9d54861 | ||
|
|
dc2a96162e | ||
|
|
d54419e779 | ||
|
|
aa651d1c7b | ||
|
|
6831f50993 | ||
|
|
e4a3536f87 | ||
|
|
e51cd564a3 | ||
|
|
079cd4c5b9 | ||
|
|
c2bd3c08e1 | ||
|
|
65ab10e9a8 | ||
|
|
0b9528f61e | ||
|
|
e2eefbd156 | ||
|
|
3ac0cd62eb | ||
|
|
e799ba716c | ||
|
|
0073409b3d | ||
| 702aa8078c | |||
|
|
bffe604527 | ||
|
|
39c212b0e6 | ||
|
|
a35b45fd36 | ||
|
|
3027a3ffda | ||
|
|
4a775b5e9b | ||
|
|
b04dd8b601 | ||
|
|
2c8de8dcc6 | ||
|
|
586b735af5 | ||
|
|
c2ee3996be | ||
|
|
a0f3bfab5f | ||
|
|
54a1df60b8 | ||
|
|
7becbc0737 | ||
|
|
e6ee38edec | ||
|
|
2d942a9926 | ||
|
|
c01a0479b7 | ||
|
|
0aaea91cc2 | ||
|
|
ce5b337582 | ||
|
|
48e5de6eab | ||
|
|
ffcddbb843 | ||
| 2f4edba08c | |||
|
|
8bc948b297 | ||
|
|
b7e1bdf1fd | ||
|
|
3059adfd37 | ||
|
|
b599f7a34b | ||
|
|
c703a47f17 | ||
|
|
da16fddce3 | ||
|
|
e1c32b6584 | ||
|
|
65e938035f | ||
|
|
bc3b313e2e | ||
|
|
da039bbc49 | ||
|
|
191ccc0d77 | ||
|
|
b2cf99c857 | ||
| 50cf526b46 | |||
|
|
54a49a8562 | ||
|
|
c1b6013412 | ||
|
|
5c9f5c7d0b | ||
|
|
091327e5d9 | ||
|
|
7615b9c99b | ||
|
|
5e5a2a3167 | ||
|
|
8196ef94a0 | ||
|
|
e58b8fe743 | ||
|
|
c0d27d0b9e | ||
|
|
8ecd3c6cf8 | ||
|
|
387ce23afd | ||
|
|
b31b4b903c | ||
|
|
dd0715c081 | ||
| c804ee39cb | |||
|
|
c05a16f7f2 | ||
|
|
3982c6d6d4 | ||
| 27a148c0aa | |||
|
|
5975828f65 | ||
|
|
36612444c5 | ||
|
|
32ece84209 | ||
|
|
7d164bad81 | ||
|
|
0d05033b38 | ||
|
|
842ed4ed66 | ||
|
|
19c099360e | ||
| cdc2eef833 | |||
|
|
7a0da1ab4a | ||
| 1fb3441a38 | |||
|
|
39c66d81cc | ||
| 0d5c141328 | |||
|
|
e92513fbe9 | ||
| 92a468233e | |||
|
|
1efdd02eda | ||
|
|
7d2a3148bb | ||
|
|
a6cc64ece0 | ||
|
|
1d2f4e3441 | ||
|
|
073b5ed418 | ||
| 701a06871c | |||
|
|
c9410c9c91 | ||
|
|
08f402d4d1 | ||
|
|
9245778391 | ||
|
|
ea53932b4b | ||
|
|
85e6567674 | ||
|
|
be483c9c1a | ||
| 5e646dfc19 | |||
|
|
e31d5716b6 | ||
|
|
02ca60a5d0 | ||
|
|
af35a996a3 | ||
|
|
e2d6cd7258 | ||
|
|
e9729e9956 | ||
|
|
b7582e69a0 | ||
|
|
f65f4efde8 | ||
|
|
a37a211282 | ||
|
|
ad0bba63b4 | ||
|
|
5b35df4007 | ||
|
|
fa600f1c2c | ||
|
|
0efa8269a1 | ||
|
|
b2cc617bc2 | ||
|
|
084994e0b5 | ||
|
|
61e10f7678 | ||
|
|
cd66521c17 | ||
|
|
d86b2c3746 | ||
|
|
bb23e6bb25 | ||
|
|
ad07f5a1fa | ||
|
|
11e725c443 | ||
|
|
43b7c2b8ec | ||
|
|
cbbfb7144b | ||
|
|
cf52d4ab76 | ||
|
|
1171f19845 | ||
|
|
0dfec4c8c5 | ||
| 878465fea9 | |||
|
|
4d9addaf22 | ||
|
|
2d915518e2 | ||
|
|
2d02f00e7d | ||
|
|
54e23a29b3 | ||
|
|
9cb923df9e | ||
|
|
2c52cffd65 | ||
| a2f2d4de5c | |||
|
|
ccd16ba987 | ||
|
|
be281130d5 | ||
|
|
ae0886cd2e | ||
|
|
6c543ffa68 | ||
|
|
f0d482af12 | ||
|
|
c0021734b6 | ||
|
|
c1cfda9df9 | ||
|
|
c035678162 | ||
|
|
636701a69e | ||
|
|
6dbac6f22f | ||
|
|
3a64b30621 | ||
|
|
47d3ad7222 | ||
|
|
df94010d21 | ||
|
|
e3ccc123d0 | ||
|
|
b4c5a38c9d | ||
|
|
aaf9cc64be | ||
|
|
28d85dc4a5 | ||
|
|
721b66481e | ||
|
|
eb8e08b9ff | ||
|
|
52e1ced2a2 | ||
|
|
c6b1b46629 | ||
|
|
7ddc2dec64 | ||
|
|
1294a86a41 | ||
|
|
445b82b21a | ||
|
|
c7fd328925 | ||
|
|
e4d988a9fd | ||
|
|
d61fcb942a | ||
|
|
07c27bf1bb | ||
|
|
4082f1fdb8 | ||
| 4851f64229 | |||
|
|
3c894335ce | ||
| b58fc0034c | |||
|
|
f02a858368 | ||
|
|
a7d065aadc | ||
|
|
f5c53d1e0e | ||
| 6aefea3a27 | |||
|
|
273b877e16 | ||
|
|
0d05b283ce | ||
| 5021b3218d | |||
|
|
2cc4124544 | ||
| ed9071d41d | |||
|
|
6f76a74b2e | ||
| a79bccc751 | |||
|
|
9c04fcb1d1 | ||
| 6336c321c7 | |||
|
|
2f91dbdc2e | ||
|
|
f27b22ead4 | ||
|
|
e46953194e | ||
| 5277465f9e | |||
|
|
34784a39f6 | ||
| c325676af1 | |||
|
|
544256f658 | ||
|
|
93957bf389 | ||
|
|
8114204485 | ||
|
|
7dcf89c47e | ||
|
|
4315744abb | ||
|
|
85093ff0c7 | ||
|
|
8d9a5fd79f | ||
|
|
6aa3c932fb | ||
|
|
88114ef4d4 | ||
|
|
caed787c04 | ||
|
|
b3a885de28 | ||
| bf3c26a5f1 | |||
|
|
2ea36a4c9b | ||
|
|
264195d3a2 | ||
|
|
e5afdd909b | ||
|
|
3fbdba2b9c | ||
| 8dae25606d | |||
|
|
97a07e11ca | ||
|
|
a25ffcc351 | ||
|
|
2c553a8016 | ||
|
|
7c882fd31c | ||
|
|
6aff7f456a | ||
|
|
c4153b404c | ||
|
|
b11d647ffa | ||
|
|
1e63e0c08c | ||
|
|
785ce157e6 | ||
|
|
d37deb36fe | ||
|
|
24d2123fc2 | ||
|
|
fe0446a43f | ||
| a79162c66a | |||
|
|
484eec8b39 | ||
|
|
5b17fba51f | ||
| f216723df0 | |||
|
|
518fba0ef5 | ||
|
|
d6451ee782 | ||
|
|
399f5f2336 | ||
|
|
0edb0dbdd1 | ||
|
|
b2a361faba | ||
|
|
4e4a2240cf | ||
| f99360cd9e | |||
|
|
3ce1b84604 | ||
|
|
4a91c6344a | ||
|
|
ff13a57d0e | ||
|
|
03f3756ffd | ||
|
|
787d1fd5d0 | ||
|
|
8bc9c5585e | ||
|
|
04a7cb417f | ||
|
|
18ba5796b0 | ||
|
|
9c2cd2566f | ||
|
|
8daa35b5a7 | ||
|
|
0419aa2323 | ||
|
|
2b2a811cae | ||
|
|
b7f13d4cbf | ||
|
|
d280db8482 | ||
|
|
0af7c8f98b | ||
|
|
9327208deb | ||
|
|
a668b14116 | ||
|
|
860e7a97fb | ||
|
|
9a32a1cfe7 | ||
|
|
6986540295 | ||
|
|
287a61ae3f | ||
| 4899dc4967 | |||
|
|
9a486b3f66 | ||
| a403903807 | |||
|
|
1881b9efb9 | ||
| 8eea6670c8 | |||
|
|
3925d835f6 | ||
| 3c9263eb48 | |||
|
|
05a56ba0bd | ||
|
|
b2c8ebe558 | ||
|
|
ef39359862 | ||
| d58e34c18f | |||
|
|
30dba33e47 | ||
| 446d2e53ee | |||
|
|
0faad2fbdb | ||
| df843ba30a | |||
|
|
419dd7e7e5 | ||
| 2f9ad6b24f | |||
|
|
17012fc447 | ||
| ae0bb79bc4 | |||
|
|
5bbc775d3a | ||
|
|
195d522f64 | ||
|
|
6c5014253f | ||
|
|
bf7fb810de | ||
|
|
7e11448ada | ||
|
|
2b9a741642 | ||
|
|
da21f064b9 | ||
|
|
9e8f02240f | ||
| 5228cb14e0 | |||
|
|
8bcd837038 | ||
|
|
7863990c68 | ||
|
|
a9109fc52d | ||
|
|
ae2642a544 | ||
|
|
ae8665f632 | ||
|
|
4ded708911 | ||
|
|
5a2dce85e8 | ||
|
|
02444de7fa | ||
|
|
7d05c9b8bf | ||
|
|
de4041bd17 | ||
|
|
fbcfc68e01 | ||
|
|
8bcd9440bf | ||
|
|
f98d3261a7 | ||
|
|
68bfcbf85f | ||
|
|
40c79d13db | ||
|
|
c8cae28c9f | ||
| d0536c8980 | |||
|
|
528db5da3a | ||
|
|
4d9b7c91a1 | ||
| e02165082a | |||
|
|
50e973da1f | ||
|
|
b219663e0a | ||
|
|
69fe307516 | ||
|
|
9cd19d03eb | ||
|
|
14ab02a1ec | ||
|
|
2e5ac8861c | ||
|
|
9d925be84d | ||
|
|
8939ffbaf5 | ||
|
|
2d6db6d059 |
184
.claude/commands/speckit.analyze.md
Normal file
184
.claude/commands/speckit.analyze.md
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
---
|
||||||
|
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/speckit.tasks` has successfully produced a complete `tasks.md`.
|
||||||
|
|
||||||
|
## Operating Constraints
|
||||||
|
|
||||||
|
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
||||||
|
|
||||||
|
**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`.
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### 1. Initialize Analysis Context
|
||||||
|
|
||||||
|
Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
|
||||||
|
|
||||||
|
- SPEC = FEATURE_DIR/spec.md
|
||||||
|
- PLAN = FEATURE_DIR/plan.md
|
||||||
|
- TASKS = FEATURE_DIR/tasks.md
|
||||||
|
|
||||||
|
Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
|
||||||
|
For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
### 2. Load Artifacts (Progressive Disclosure)
|
||||||
|
|
||||||
|
Load only the minimal necessary context from each artifact:
|
||||||
|
|
||||||
|
**From spec.md:**
|
||||||
|
|
||||||
|
- Overview/Context
|
||||||
|
- Functional Requirements
|
||||||
|
- Non-Functional Requirements
|
||||||
|
- User Stories
|
||||||
|
- Edge Cases (if present)
|
||||||
|
|
||||||
|
**From plan.md:**
|
||||||
|
|
||||||
|
- Architecture/stack choices
|
||||||
|
- Data Model references
|
||||||
|
- Phases
|
||||||
|
- Technical constraints
|
||||||
|
|
||||||
|
**From tasks.md:**
|
||||||
|
|
||||||
|
- Task IDs
|
||||||
|
- Descriptions
|
||||||
|
- Phase grouping
|
||||||
|
- Parallel markers [P]
|
||||||
|
- Referenced file paths
|
||||||
|
|
||||||
|
**From constitution:**
|
||||||
|
|
||||||
|
- Load `.specify/memory/constitution.md` for principle validation
|
||||||
|
|
||||||
|
### 3. Build Semantic Models
|
||||||
|
|
||||||
|
Create internal representations (do not include raw artifacts in output):
|
||||||
|
|
||||||
|
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
|
||||||
|
- **User story/action inventory**: Discrete user actions with acceptance criteria
|
||||||
|
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
|
||||||
|
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
|
||||||
|
|
||||||
|
### 4. Detection Passes (Token-Efficient Analysis)
|
||||||
|
|
||||||
|
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
|
||||||
|
|
||||||
|
#### A. Duplication Detection
|
||||||
|
|
||||||
|
- Identify near-duplicate requirements
|
||||||
|
- Mark lower-quality phrasing for consolidation
|
||||||
|
|
||||||
|
#### B. Ambiguity Detection
|
||||||
|
|
||||||
|
- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria
|
||||||
|
- Flag unresolved placeholders (TODO, TKTK, ???, `<placeholder>`, etc.)
|
||||||
|
|
||||||
|
#### C. Underspecification
|
||||||
|
|
||||||
|
- Requirements with verbs but missing object or measurable outcome
|
||||||
|
- User stories missing acceptance criteria alignment
|
||||||
|
- Tasks referencing files or components not defined in spec/plan
|
||||||
|
|
||||||
|
#### D. Constitution Alignment
|
||||||
|
|
||||||
|
- Any requirement or plan element conflicting with a MUST principle
|
||||||
|
- Missing mandated sections or quality gates from constitution
|
||||||
|
|
||||||
|
#### E. Coverage Gaps
|
||||||
|
|
||||||
|
- Requirements with zero associated tasks
|
||||||
|
- Tasks with no mapped requirement/story
|
||||||
|
- Non-functional requirements not reflected in tasks (e.g., performance, security)
|
||||||
|
|
||||||
|
#### F. Inconsistency
|
||||||
|
|
||||||
|
- Terminology drift (same concept named differently across files)
|
||||||
|
- Data entities referenced in plan but absent in spec (or vice versa)
|
||||||
|
- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note)
|
||||||
|
- Conflicting requirements (e.g., one requires Next.js while other specifies Vue)
|
||||||
|
|
||||||
|
### 5. Severity Assignment
|
||||||
|
|
||||||
|
Use this heuristic to prioritize findings:
|
||||||
|
|
||||||
|
- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality
|
||||||
|
- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion
|
||||||
|
- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case
|
||||||
|
- **LOW**: Style/wording improvements, minor redundancy not affecting execution order
|
||||||
|
|
||||||
|
### 6. Produce Compact Analysis Report
|
||||||
|
|
||||||
|
Output a Markdown report (no file writes) with the following structure:
|
||||||
|
|
||||||
|
## Specification Analysis Report
|
||||||
|
|
||||||
|
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
||||||
|
|----|----------|----------|-------------|---------|----------------|
|
||||||
|
| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
|
||||||
|
|
||||||
|
(Add one row per finding; generate stable IDs prefixed by category initial.)
|
||||||
|
|
||||||
|
**Coverage Summary Table:**
|
||||||
|
|
||||||
|
| Requirement Key | Has Task? | Task IDs | Notes |
|
||||||
|
|-----------------|-----------|----------|-------|
|
||||||
|
|
||||||
|
**Constitution Alignment Issues:** (if any)
|
||||||
|
|
||||||
|
**Unmapped Tasks:** (if any)
|
||||||
|
|
||||||
|
**Metrics:**
|
||||||
|
|
||||||
|
- Total Requirements
|
||||||
|
- Total Tasks
|
||||||
|
- Coverage % (requirements with >=1 task)
|
||||||
|
- Ambiguity Count
|
||||||
|
- Duplication Count
|
||||||
|
- Critical Issues Count
|
||||||
|
|
||||||
|
### 7. Provide Next Actions
|
||||||
|
|
||||||
|
At end of report, output a concise Next Actions block:
|
||||||
|
|
||||||
|
- If CRITICAL issues exist: Recommend resolving before `/speckit.implement`
|
||||||
|
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
|
||||||
|
- Provide explicit command suggestions: e.g., "Run /speckit.specify with refinement", "Run /speckit.plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
|
||||||
|
|
||||||
|
### 8. Offer Remediation
|
||||||
|
|
||||||
|
Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
|
||||||
|
|
||||||
|
## Operating Principles
|
||||||
|
|
||||||
|
### Context Efficiency
|
||||||
|
|
||||||
|
- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation
|
||||||
|
- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis
|
||||||
|
- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow
|
||||||
|
- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts
|
||||||
|
|
||||||
|
### Analysis Guidelines
|
||||||
|
|
||||||
|
- **NEVER modify files** (this is read-only analysis)
|
||||||
|
- **NEVER hallucinate missing sections** (if absent, report them accurately)
|
||||||
|
- **Prioritize constitution violations** (these are always CRITICAL)
|
||||||
|
- **Use examples over exhaustive rules** (cite specific instances, not generic patterns)
|
||||||
|
- **Report zero issues gracefully** (emit success report with coverage statistics)
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
$ARGUMENTS
|
||||||
294
.claude/commands/speckit.checklist.md
Normal file
294
.claude/commands/speckit.checklist.md
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
---
|
||||||
|
description: Generate a custom checklist for the current feature based on user requirements.
|
||||||
|
---
|
||||||
|
|
||||||
|
## Checklist Purpose: "Unit Tests for English"
|
||||||
|
|
||||||
|
**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain.
|
||||||
|
|
||||||
|
**NOT for verification/testing**:
|
||||||
|
|
||||||
|
- ❌ NOT "Verify the button clicks correctly"
|
||||||
|
- ❌ NOT "Test error handling works"
|
||||||
|
- ❌ NOT "Confirm the API returns 200"
|
||||||
|
- ❌ NOT checking if code/implementation matches the spec
|
||||||
|
|
||||||
|
**FOR requirements quality validation**:
|
||||||
|
|
||||||
|
- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness)
|
||||||
|
- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity)
|
||||||
|
- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency)
|
||||||
|
- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage)
|
||||||
|
- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases)
|
||||||
|
|
||||||
|
**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works.
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list.
|
||||||
|
- All file paths must be absolute.
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST:
|
||||||
|
- Be generated from the user's phrasing + extracted signals from spec/plan/tasks
|
||||||
|
- Only ask about information that materially changes checklist content
|
||||||
|
- Be skipped individually if already unambiguous in `$ARGUMENTS`
|
||||||
|
- Prefer precision over breadth
|
||||||
|
|
||||||
|
Generation algorithm:
|
||||||
|
1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts").
|
||||||
|
2. Cluster signals into candidate focus areas (max 4) ranked by relevance.
|
||||||
|
3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit.
|
||||||
|
4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria.
|
||||||
|
5. Formulate questions chosen from these archetypes:
|
||||||
|
- Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?")
|
||||||
|
- Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?")
|
||||||
|
- Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?")
|
||||||
|
- Audience framing (e.g., "Will this be used by the author only or peers during PR review?")
|
||||||
|
- Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?")
|
||||||
|
- Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?")
|
||||||
|
|
||||||
|
Question formatting rules:
|
||||||
|
- If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters
|
||||||
|
- Limit to A–E options maximum; omit table if a free-form answer is clearer
|
||||||
|
- Never ask the user to restate what they already said
|
||||||
|
- Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope."
|
||||||
|
|
||||||
|
Defaults when interaction impossible:
|
||||||
|
- Depth: Standard
|
||||||
|
- Audience: Reviewer (PR) if code-related; Author otherwise
|
||||||
|
- Focus: Top 2 relevance clusters
|
||||||
|
|
||||||
|
Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more.
|
||||||
|
|
||||||
|
3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers:
|
||||||
|
- Derive checklist theme (e.g., security, review, deploy, ux)
|
||||||
|
- Consolidate explicit must-have items mentioned by user
|
||||||
|
- Map focus selections to category scaffolding
|
||||||
|
- Infer any missing context from spec/plan/tasks (do NOT hallucinate)
|
||||||
|
|
||||||
|
4. **Load feature context**: Read from FEATURE_DIR:
|
||||||
|
- spec.md: Feature requirements and scope
|
||||||
|
- plan.md (if exists): Technical details, dependencies
|
||||||
|
- tasks.md (if exists): Implementation tasks
|
||||||
|
|
||||||
|
**Context Loading Strategy**:
|
||||||
|
- Load only necessary portions relevant to active focus areas (avoid full-file dumping)
|
||||||
|
- Prefer summarizing long sections into concise scenario/requirement bullets
|
||||||
|
- Use progressive disclosure: add follow-on retrieval only if gaps detected
|
||||||
|
- If source docs are large, generate interim summary items instead of embedding raw text
|
||||||
|
|
||||||
|
5. **Generate checklist** - Create "Unit Tests for Requirements":
|
||||||
|
- Create `FEATURE_DIR/checklists/` directory if it doesn't exist
|
||||||
|
- Generate unique checklist filename:
|
||||||
|
- Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
|
||||||
|
- Format: `[domain].md`
|
||||||
|
- If file exists, append to existing file
|
||||||
|
- Number items sequentially starting from CHK001
|
||||||
|
- Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists)
|
||||||
|
|
||||||
|
**CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
|
||||||
|
Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
|
||||||
|
- **Completeness**: Are all necessary requirements present?
|
||||||
|
- **Clarity**: Are requirements unambiguous and specific?
|
||||||
|
- **Consistency**: Do requirements align with each other?
|
||||||
|
- **Measurability**: Can requirements be objectively verified?
|
||||||
|
- **Coverage**: Are all scenarios/edge cases addressed?
|
||||||
|
|
||||||
|
**Category Structure** - Group items by requirement quality dimensions:
|
||||||
|
- **Requirement Completeness** (Are all necessary requirements documented?)
|
||||||
|
- **Requirement Clarity** (Are requirements specific and unambiguous?)
|
||||||
|
- **Requirement Consistency** (Do requirements align without conflicts?)
|
||||||
|
- **Acceptance Criteria Quality** (Are success criteria measurable?)
|
||||||
|
- **Scenario Coverage** (Are all flows/cases addressed?)
|
||||||
|
- **Edge Case Coverage** (Are boundary conditions defined?)
|
||||||
|
- **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?)
|
||||||
|
- **Dependencies & Assumptions** (Are they documented and validated?)
|
||||||
|
- **Ambiguities & Conflicts** (What needs clarification?)
|
||||||
|
|
||||||
|
**HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**:
|
||||||
|
|
||||||
|
❌ **WRONG** (Testing implementation):
|
||||||
|
- "Verify landing page displays 3 episode cards"
|
||||||
|
- "Test hover states work on desktop"
|
||||||
|
- "Confirm logo click navigates home"
|
||||||
|
|
||||||
|
✅ **CORRECT** (Testing requirements quality):
|
||||||
|
- "Are the exact number and layout of featured episodes specified?" [Completeness]
|
||||||
|
- "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity]
|
||||||
|
- "Are hover state requirements consistent across all interactive elements?" [Consistency]
|
||||||
|
- "Are keyboard navigation requirements defined for all interactive UI?" [Coverage]
|
||||||
|
- "Is the fallback behavior specified when logo image fails to load?" [Edge Cases]
|
||||||
|
- "Are loading states defined for asynchronous episode data?" [Completeness]
|
||||||
|
- "Does the spec define visual hierarchy for competing UI elements?" [Clarity]
|
||||||
|
|
||||||
|
**ITEM STRUCTURE**:
|
||||||
|
Each item should follow this pattern:
|
||||||
|
- Question format asking about requirement quality
|
||||||
|
- Focus on what's WRITTEN (or not written) in the spec/plan
|
||||||
|
- Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.]
|
||||||
|
- Reference spec section `[Spec §X.Y]` when checking existing requirements
|
||||||
|
- Use `[Gap]` marker when checking for missing requirements
|
||||||
|
|
||||||
|
**EXAMPLES BY QUALITY DIMENSION**:
|
||||||
|
|
||||||
|
Completeness:
|
||||||
|
- "Are error handling requirements defined for all API failure modes? [Gap]"
|
||||||
|
- "Are accessibility requirements specified for all interactive elements? [Completeness]"
|
||||||
|
- "Are mobile breakpoint requirements defined for responsive layouts? [Gap]"
|
||||||
|
|
||||||
|
Clarity:
|
||||||
|
- "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]"
|
||||||
|
- "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]"
|
||||||
|
- "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]"
|
||||||
|
|
||||||
|
Consistency:
|
||||||
|
- "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]"
|
||||||
|
- "Are card component requirements consistent between landing and detail pages? [Consistency]"
|
||||||
|
|
||||||
|
Coverage:
|
||||||
|
- "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]"
|
||||||
|
- "Are concurrent user interaction scenarios addressed? [Coverage, Gap]"
|
||||||
|
- "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]"
|
||||||
|
|
||||||
|
Measurability:
|
||||||
|
- "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]"
|
||||||
|
- "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]"
|
||||||
|
|
||||||
|
**Scenario Classification & Coverage** (Requirements Quality Focus):
|
||||||
|
- Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios
|
||||||
|
- For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?"
|
||||||
|
- If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]"
|
||||||
|
- Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]"
|
||||||
|
|
||||||
|
**Traceability Requirements**:
|
||||||
|
- MINIMUM: ≥80% of items MUST include at least one traceability reference
|
||||||
|
- Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]`
|
||||||
|
- If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]"
|
||||||
|
|
||||||
|
**Surface & Resolve Issues** (Requirements Quality Problems):
|
||||||
|
Ask questions about the requirements themselves:
|
||||||
|
- Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]"
|
||||||
|
- Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]"
|
||||||
|
- Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]"
|
||||||
|
- Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]"
|
||||||
|
- Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]"
|
||||||
|
|
||||||
|
**Content Consolidation**:
|
||||||
|
- Soft cap: If raw candidate items > 40, prioritize by risk/impact
|
||||||
|
- Merge near-duplicates checking the same requirement aspect
|
||||||
|
- If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]"
|
||||||
|
|
||||||
|
**🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test:
|
||||||
|
- ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior
|
||||||
|
- ❌ References to code execution, user actions, system behavior
|
||||||
|
- ❌ "Displays correctly", "works properly", "functions as expected"
|
||||||
|
- ❌ "Click", "navigate", "render", "load", "execute"
|
||||||
|
- ❌ Test cases, test plans, QA procedures
|
||||||
|
- ❌ Implementation details (frameworks, APIs, algorithms)
|
||||||
|
|
||||||
|
**✅ REQUIRED PATTERNS** - These test requirements quality:
|
||||||
|
- ✅ "Are [requirement type] defined/specified/documented for [scenario]?"
|
||||||
|
- ✅ "Is [vague term] quantified/clarified with specific criteria?"
|
||||||
|
- ✅ "Are requirements consistent between [section A] and [section B]?"
|
||||||
|
- ✅ "Can [requirement] be objectively measured/verified?"
|
||||||
|
- ✅ "Are [edge cases/scenarios] addressed in requirements?"
|
||||||
|
- ✅ "Does the spec define [missing aspect]?"
|
||||||
|
|
||||||
|
6. **Structure Reference**: Generate the checklist following the canonical template in `.specify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### <requirement item>` lines with globally incrementing IDs starting at CHK001.
|
||||||
|
|
||||||
|
7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
|
||||||
|
- Focus areas selected
|
||||||
|
- Depth level
|
||||||
|
- Actor/timing
|
||||||
|
- Any explicit user-specified must-have items incorporated
|
||||||
|
|
||||||
|
**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
|
||||||
|
|
||||||
|
- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
|
||||||
|
- Simple, memorable filenames that indicate checklist purpose
|
||||||
|
- Easy identification and navigation in the `checklists/` folder
|
||||||
|
|
||||||
|
To avoid clutter, use descriptive types and clean up obsolete checklists when done.
|
||||||
|
|
||||||
|
## Example Checklist Types & Sample Items
|
||||||
|
|
||||||
|
**UX Requirements Quality:** `ux.md`
|
||||||
|
|
||||||
|
Sample items (testing the requirements, NOT the implementation):
|
||||||
|
|
||||||
|
- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]"
|
||||||
|
- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]"
|
||||||
|
- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]"
|
||||||
|
- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]"
|
||||||
|
- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]"
|
||||||
|
- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]"
|
||||||
|
|
||||||
|
**API Requirements Quality:** `api.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are error response formats specified for all failure scenarios? [Completeness]"
|
||||||
|
- "Are rate limiting requirements quantified with specific thresholds? [Clarity]"
|
||||||
|
- "Are authentication requirements consistent across all endpoints? [Consistency]"
|
||||||
|
- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]"
|
||||||
|
- "Is versioning strategy documented in requirements? [Gap]"
|
||||||
|
|
||||||
|
**Performance Requirements Quality:** `performance.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are performance requirements quantified with specific metrics? [Clarity]"
|
||||||
|
- "Are performance targets defined for all critical user journeys? [Coverage]"
|
||||||
|
- "Are performance requirements under different load conditions specified? [Completeness]"
|
||||||
|
- "Can performance requirements be objectively measured? [Measurability]"
|
||||||
|
- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]"
|
||||||
|
|
||||||
|
**Security Requirements Quality:** `security.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are authentication requirements specified for all protected resources? [Coverage]"
|
||||||
|
- "Are data protection requirements defined for sensitive information? [Completeness]"
|
||||||
|
- "Is the threat model documented and requirements aligned to it? [Traceability]"
|
||||||
|
- "Are security requirements consistent with compliance obligations? [Consistency]"
|
||||||
|
- "Are security failure/breach response requirements defined? [Gap, Exception Flow]"
|
||||||
|
|
||||||
|
## Anti-Examples: What NOT To Do
|
||||||
|
|
||||||
|
**❌ WRONG - These test implementation, not requirements:**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001]
|
||||||
|
- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003]
|
||||||
|
- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010]
|
||||||
|
- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005]
|
||||||
|
```
|
||||||
|
|
||||||
|
**✅ CORRECT - These test requirements quality:**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001]
|
||||||
|
- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003]
|
||||||
|
- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010]
|
||||||
|
- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005]
|
||||||
|
- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap]
|
||||||
|
- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key Differences:**
|
||||||
|
|
||||||
|
- Wrong: Tests if the system works correctly
|
||||||
|
- Correct: Tests if the requirements are written correctly
|
||||||
|
- Wrong: Verification of behavior
|
||||||
|
- Correct: Validation of requirement quality
|
||||||
|
- Wrong: "Does it do X?"
|
||||||
|
- Correct: "Is X clearly specified?"
|
||||||
181
.claude/commands/speckit.clarify.md
Normal file
181
.claude/commands/speckit.clarify.md
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
---
|
||||||
|
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Technical Plan
|
||||||
|
agent: speckit.plan
|
||||||
|
prompt: Create a plan for the spec. I am building with...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
|
||||||
|
|
||||||
|
Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
|
||||||
|
|
||||||
|
Execution steps:
|
||||||
|
|
||||||
|
1. Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -PathsOnly` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
|
||||||
|
- `FEATURE_DIR`
|
||||||
|
- `FEATURE_SPEC`
|
||||||
|
- (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
|
||||||
|
- If JSON parsing fails, abort and instruct user to re-run `/speckit.specify` or verify feature branch environment.
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
|
||||||
|
|
||||||
|
Functional Scope & Behavior:
|
||||||
|
- Core user goals & success criteria
|
||||||
|
- Explicit out-of-scope declarations
|
||||||
|
- User roles / personas differentiation
|
||||||
|
|
||||||
|
Domain & Data Model:
|
||||||
|
- Entities, attributes, relationships
|
||||||
|
- Identity & uniqueness rules
|
||||||
|
- Lifecycle/state transitions
|
||||||
|
- Data volume / scale assumptions
|
||||||
|
|
||||||
|
Interaction & UX Flow:
|
||||||
|
- Critical user journeys / sequences
|
||||||
|
- Error/empty/loading states
|
||||||
|
- Accessibility or localization notes
|
||||||
|
|
||||||
|
Non-Functional Quality Attributes:
|
||||||
|
- Performance (latency, throughput targets)
|
||||||
|
- Scalability (horizontal/vertical, limits)
|
||||||
|
- Reliability & availability (uptime, recovery expectations)
|
||||||
|
- Observability (logging, metrics, tracing signals)
|
||||||
|
- Security & privacy (authN/Z, data protection, threat assumptions)
|
||||||
|
- Compliance / regulatory constraints (if any)
|
||||||
|
|
||||||
|
Integration & External Dependencies:
|
||||||
|
- External services/APIs and failure modes
|
||||||
|
- Data import/export formats
|
||||||
|
- Protocol/versioning assumptions
|
||||||
|
|
||||||
|
Edge Cases & Failure Handling:
|
||||||
|
- Negative scenarios
|
||||||
|
- Rate limiting / throttling
|
||||||
|
- Conflict resolution (e.g., concurrent edits)
|
||||||
|
|
||||||
|
Constraints & Tradeoffs:
|
||||||
|
- Technical constraints (language, storage, hosting)
|
||||||
|
- Explicit tradeoffs or rejected alternatives
|
||||||
|
|
||||||
|
Terminology & Consistency:
|
||||||
|
- Canonical glossary terms
|
||||||
|
- Avoided synonyms / deprecated terms
|
||||||
|
|
||||||
|
Completion Signals:
|
||||||
|
- Acceptance criteria testability
|
||||||
|
- Measurable Definition of Done style indicators
|
||||||
|
|
||||||
|
Misc / Placeholders:
|
||||||
|
- TODO markers / unresolved decisions
|
||||||
|
- Ambiguous adjectives ("robust", "intuitive") lacking quantification
|
||||||
|
|
||||||
|
For each category with Partial or Missing status, add a candidate question opportunity unless:
|
||||||
|
- Clarification would not materially change implementation or validation strategy
|
||||||
|
- Information is better deferred to planning phase (note internally)
|
||||||
|
|
||||||
|
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
|
||||||
|
- Maximum of 10 total questions across the whole session.
|
||||||
|
- Each question must be answerable with EITHER:
|
||||||
|
- A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
|
||||||
|
- A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
|
||||||
|
- Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
|
||||||
|
- Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
|
||||||
|
- Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
|
||||||
|
- Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
|
||||||
|
- If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic.
|
||||||
|
|
||||||
|
4. Sequential questioning loop (interactive):
|
||||||
|
- Present EXACTLY ONE question at a time.
|
||||||
|
- For multiple‑choice questions:
|
||||||
|
- **Analyze all options** and determine the **most suitable option** based on:
|
||||||
|
- Best practices for the project type
|
||||||
|
- Common patterns in similar implementations
|
||||||
|
- Risk reduction (security, performance, maintainability)
|
||||||
|
- Alignment with any explicit project goals or constraints visible in the spec
|
||||||
|
- Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice).
|
||||||
|
- Format as: `**Recommended:** Option [X] - <reasoning>`
|
||||||
|
- Then render all options as a Markdown table:
|
||||||
|
|
||||||
|
| Option | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| A | <Option A description> |
|
||||||
|
| B | <Option B description> |
|
||||||
|
| C | <Option C description> (add D/E as needed up to 5) |
|
||||||
|
| Short | Provide a different short answer (<=5 words) (Include only if free-form alternative is appropriate) |
|
||||||
|
|
||||||
|
- After the table, add: `You can reply with the option letter (e.g., "A"), accept the recommendation by saying "yes" or "recommended", or provide your own short answer.`
|
||||||
|
- For short‑answer style (no meaningful discrete options):
|
||||||
|
- Provide your **suggested answer** based on best practices and context.
|
||||||
|
- Format as: `**Suggested:** <your proposed answer> - <brief reasoning>`
|
||||||
|
- Then output: `Format: Short answer (<=5 words). You can accept the suggestion by saying "yes" or "suggested", or provide your own answer.`
|
||||||
|
- After the user answers:
|
||||||
|
- If the user replies with "yes", "recommended", or "suggested", use your previously stated recommendation/suggestion as the answer.
|
||||||
|
- Otherwise, validate the answer maps to one option or fits the <=5 word constraint.
|
||||||
|
- If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
|
||||||
|
- Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
|
||||||
|
- Stop asking further questions when:
|
||||||
|
- All critical ambiguities resolved early (remaining queued items become unnecessary), OR
|
||||||
|
- User signals completion ("done", "good", "no more"), OR
|
||||||
|
- You reach 5 asked questions.
|
||||||
|
- Never reveal future queued questions in advance.
|
||||||
|
- If no valid questions exist at start, immediately report no critical ambiguities.
|
||||||
|
|
||||||
|
5. Integration after EACH accepted answer (incremental update approach):
|
||||||
|
- Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
|
||||||
|
- For the first integrated answer in this session:
|
||||||
|
- Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
|
||||||
|
- Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
|
||||||
|
- Append a bullet line immediately after acceptance: `- Q: <question> → A: <final answer>`.
|
||||||
|
- Then immediately apply the clarification to the most appropriate section(s):
|
||||||
|
- Functional ambiguity → Update or add a bullet in Functional Requirements.
|
||||||
|
- User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
||||||
|
- Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
||||||
|
- Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
||||||
|
- Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
||||||
|
- Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
||||||
|
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
||||||
|
- Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
|
||||||
|
- Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
|
||||||
|
- Keep each inserted clarification minimal and testable (avoid narrative drift).
|
||||||
|
|
||||||
|
6. Validation (performed after EACH write plus final pass):
|
||||||
|
- Clarifications session contains exactly one bullet per accepted answer (no duplicates).
|
||||||
|
- Total asked (accepted) questions ≤ 5.
|
||||||
|
- Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
|
||||||
|
- No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
|
||||||
|
- Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
|
||||||
|
- Terminology consistency: same canonical term used across all updated sections.
|
||||||
|
|
||||||
|
7. Write the updated spec back to `FEATURE_SPEC`.
|
||||||
|
|
||||||
|
8. Report completion (after questioning loop ends or early termination):
|
||||||
|
- Number of questions asked & answered.
|
||||||
|
- Path to updated spec.
|
||||||
|
- Sections touched (list names).
|
||||||
|
- Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
|
||||||
|
- If any Outstanding or Deferred remain, recommend whether to proceed to `/speckit.plan` or run `/speckit.clarify` again later post-plan.
|
||||||
|
- Suggested next command.
|
||||||
|
|
||||||
|
Behavior rules:
|
||||||
|
|
||||||
|
- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
|
||||||
|
- If spec file missing, instruct user to run `/speckit.specify` first (do not create a new spec here).
|
||||||
|
- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
|
||||||
|
- Avoid speculative tech stack questions unless the absence blocks functional clarity.
|
||||||
|
- Respect user early termination signals ("stop", "done", "proceed").
|
||||||
|
- If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
|
||||||
|
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
||||||
|
|
||||||
|
Context for prioritization: $ARGUMENTS
|
||||||
84
.claude/commands/speckit.constitution.md
Normal file
84
.claude/commands/speckit.constitution.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
---
|
||||||
|
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Specification
|
||||||
|
agent: speckit.specify
|
||||||
|
prompt: Implement the feature specification based on the updated constitution. I want to build...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
||||||
|
|
||||||
|
**Note**: If `.specify/memory/constitution.md` does not exist yet, it should have been initialized from `.specify/templates/constitution-template.md` during project setup. If it's missing, copy the template first.
|
||||||
|
|
||||||
|
Follow this execution flow:
|
||||||
|
|
||||||
|
1. Load the existing constitution at `.specify/memory/constitution.md`.
|
||||||
|
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
|
||||||
|
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
|
||||||
|
|
||||||
|
2. Collect/derive values for placeholders:
|
||||||
|
- If user input (conversation) supplies a value, use it.
|
||||||
|
- Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
|
||||||
|
- For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
|
||||||
|
- `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
|
||||||
|
- MAJOR: Backward incompatible governance/principle removals or redefinitions.
|
||||||
|
- MINOR: New principle/section added or materially expanded guidance.
|
||||||
|
- PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
|
||||||
|
- If version bump type ambiguous, propose reasoning before finalizing.
|
||||||
|
|
||||||
|
3. Draft the updated constitution content:
|
||||||
|
- Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
|
||||||
|
- Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
|
||||||
|
- Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing non‑negotiable rules, explicit rationale if not obvious.
|
||||||
|
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
|
||||||
|
|
||||||
|
4. Consistency propagation checklist (convert prior checklist into active validations):
|
||||||
|
- Read `.specify/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
||||||
|
- Read `.specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
||||||
|
- Read `.specify/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
||||||
|
- Read each command file in `.specify/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
||||||
|
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
|
||||||
|
|
||||||
|
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
|
||||||
|
- Version change: old → new
|
||||||
|
- List of modified principles (old title → new title if renamed)
|
||||||
|
- Added sections
|
||||||
|
- Removed sections
|
||||||
|
- Templates requiring updates (✅ updated / ⚠ pending) with file paths
|
||||||
|
- Follow-up TODOs if any placeholders intentionally deferred.
|
||||||
|
|
||||||
|
6. Validation before final output:
|
||||||
|
- No remaining unexplained bracket tokens.
|
||||||
|
- Version line matches report.
|
||||||
|
- Dates ISO format YYYY-MM-DD.
|
||||||
|
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
|
||||||
|
|
||||||
|
7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite).
|
||||||
|
|
||||||
|
8. Output a final summary to the user with:
|
||||||
|
- New version and bump rationale.
|
||||||
|
- Any files flagged for manual follow-up.
|
||||||
|
- Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
|
||||||
|
|
||||||
|
Formatting & Style Requirements:
|
||||||
|
|
||||||
|
- Use Markdown headings exactly as in the template (do not demote/promote levels).
|
||||||
|
- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
|
||||||
|
- Keep a single blank line between sections.
|
||||||
|
- Avoid trailing whitespace.
|
||||||
|
|
||||||
|
If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
|
||||||
|
|
||||||
|
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
||||||
|
|
||||||
|
Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file.
|
||||||
135
.claude/commands/speckit.implement.md
Normal file
135
.claude/commands/speckit.implement.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
---
|
||||||
|
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Check checklists status** (if FEATURE_DIR/checklists/ exists):
|
||||||
|
- Scan all checklist files in the checklists/ directory
|
||||||
|
- For each checklist, count:
|
||||||
|
- Total items: All lines matching `- [ ]` or `- [X]` or `- [x]`
|
||||||
|
- Completed items: Lines matching `- [X]` or `- [x]`
|
||||||
|
- Incomplete items: Lines matching `- [ ]`
|
||||||
|
- Create a status table:
|
||||||
|
|
||||||
|
```text
|
||||||
|
| Checklist | Total | Completed | Incomplete | Status |
|
||||||
|
|-----------|-------|-----------|------------|--------|
|
||||||
|
| ux.md | 12 | 12 | 0 | ✓ PASS |
|
||||||
|
| test.md | 8 | 5 | 3 | ✗ FAIL |
|
||||||
|
| security.md | 6 | 6 | 0 | ✓ PASS |
|
||||||
|
```
|
||||||
|
|
||||||
|
- Calculate overall status:
|
||||||
|
- **PASS**: All checklists have 0 incomplete items
|
||||||
|
- **FAIL**: One or more checklists have incomplete items
|
||||||
|
|
||||||
|
- **If any checklist is incomplete**:
|
||||||
|
- Display the table with incomplete item counts
|
||||||
|
- **STOP** and ask: "Some checklists are incomplete. Do you want to proceed with implementation anyway? (yes/no)"
|
||||||
|
- Wait for user response before continuing
|
||||||
|
- If user says "no" or "wait" or "stop", halt execution
|
||||||
|
- If user says "yes" or "proceed" or "continue", proceed to step 3
|
||||||
|
|
||||||
|
- **If all checklists are complete**:
|
||||||
|
- Display the table showing all checklists passed
|
||||||
|
- Automatically proceed to step 3
|
||||||
|
|
||||||
|
3. Load and analyze the implementation context:
|
||||||
|
- **REQUIRED**: Read tasks.md for the complete task list and execution plan
|
||||||
|
- **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
|
||||||
|
- **IF EXISTS**: Read data-model.md for entities and relationships
|
||||||
|
- **IF EXISTS**: Read contracts/ for API specifications and test requirements
|
||||||
|
- **IF EXISTS**: Read research.md for technical decisions and constraints
|
||||||
|
- **IF EXISTS**: Read quickstart.md for integration scenarios
|
||||||
|
|
||||||
|
4. **Project Setup Verification**:
|
||||||
|
- **REQUIRED**: Create/verify ignore files based on actual project setup:
|
||||||
|
|
||||||
|
**Detection & Creation Logic**:
|
||||||
|
- Check if the following command succeeds to determine if the repository is a git repo (create/verify .gitignore if so):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git rev-parse --git-dir 2>/dev/null
|
||||||
|
```
|
||||||
|
|
||||||
|
- Check if Dockerfile* exists or Docker in plan.md → create/verify .dockerignore
|
||||||
|
- Check if .eslintrc* exists → create/verify .eslintignore
|
||||||
|
- Check if eslint.config.* exists → ensure the config's `ignores` entries cover required patterns
|
||||||
|
- Check if .prettierrc* exists → create/verify .prettierignore
|
||||||
|
- Check if .npmrc or package.json exists → create/verify .npmignore (if publishing)
|
||||||
|
- Check if terraform files (*.tf) exist → create/verify .terraformignore
|
||||||
|
- Check if .helmignore needed (helm charts present) → create/verify .helmignore
|
||||||
|
|
||||||
|
**If ignore file already exists**: Verify it contains essential patterns, append missing critical patterns only
|
||||||
|
**If ignore file missing**: Create with full pattern set for detected technology
|
||||||
|
|
||||||
|
**Common Patterns by Technology** (from plan.md tech stack):
|
||||||
|
- **Node.js/JavaScript/TypeScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
|
||||||
|
- **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
|
||||||
|
- **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
|
||||||
|
- **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
|
||||||
|
- **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
|
||||||
|
- **Ruby**: `.bundle/`, `log/`, `tmp/`, `*.gem`, `vendor/bundle/`
|
||||||
|
- **PHP**: `vendor/`, `*.log`, `*.cache`, `*.env`
|
||||||
|
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
|
||||||
|
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
|
||||||
|
- **Swift**: `.build/`, `DerivedData/`, `*.swiftpm/`, `Packages/`
|
||||||
|
- **R**: `.Rproj.user/`, `.Rhistory`, `.RData`, `.Ruserdata`, `*.Rproj`, `packrat/`, `renv/`
|
||||||
|
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
|
||||||
|
|
||||||
|
**Tool-Specific Patterns**:
|
||||||
|
- **Docker**: `node_modules/`, `.git/`, `Dockerfile*`, `.dockerignore`, `*.log*`, `.env*`, `coverage/`
|
||||||
|
- **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
|
||||||
|
- **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
|
||||||
|
- **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
|
||||||
|
- **Kubernetes/k8s**: `*.secret.yaml`, `secrets/`, `.kube/`, `kubeconfig*`, `*.key`, `*.crt`
|
||||||
|
|
||||||
|
5. Parse tasks.md structure and extract:
|
||||||
|
- **Task phases**: Setup, Tests, Core, Integration, Polish
|
||||||
|
- **Task dependencies**: Sequential vs parallel execution rules
|
||||||
|
- **Task details**: ID, description, file paths, parallel markers [P]
|
||||||
|
- **Execution flow**: Order and dependency requirements
|
||||||
|
|
||||||
|
6. Execute implementation following the task plan:
|
||||||
|
- **Phase-by-phase execution**: Complete each phase before moving to the next
|
||||||
|
- **Respect dependencies**: Run sequential tasks in order, parallel tasks [P] can run together
|
||||||
|
- **Follow TDD approach**: Execute test tasks before their corresponding implementation tasks
|
||||||
|
- **File-based coordination**: Tasks affecting the same files must run sequentially
|
||||||
|
- **Validation checkpoints**: Verify each phase completion before proceeding
|
||||||
|
|
||||||
|
7. Implementation execution rules:
|
||||||
|
- **Setup first**: Initialize project structure, dependencies, configuration
|
||||||
|
- **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
|
||||||
|
- **Core development**: Implement models, services, CLI commands, endpoints
|
||||||
|
- **Integration work**: Database connections, middleware, logging, external services
|
||||||
|
- **Polish and validation**: Unit tests, performance optimization, documentation
|
||||||
|
|
||||||
|
8. Progress tracking and error handling:
|
||||||
|
- Report progress after each completed task
|
||||||
|
- Halt execution if any non-parallel task fails
|
||||||
|
- For parallel tasks [P], continue with successful tasks, report failed ones
|
||||||
|
- Provide clear error messages with context for debugging
|
||||||
|
- Suggest next steps if implementation cannot proceed
|
||||||
|
- **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
|
||||||
|
|
||||||
|
9. Completion validation:
|
||||||
|
- Verify all required tasks are completed
|
||||||
|
- Check that implemented features match the original specification
|
||||||
|
- Validate that tests pass and coverage meets requirements
|
||||||
|
- Confirm the implementation follows the technical plan
|
||||||
|
- Report final status with summary of completed work
|
||||||
|
|
||||||
|
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit.tasks` first to regenerate the task list.
|
||||||
90
.claude/commands/speckit.plan.md
Normal file
90
.claude/commands/speckit.plan.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
---
|
||||||
|
description: Execute the implementation planning workflow using the plan template to generate design artifacts.
|
||||||
|
handoffs:
|
||||||
|
- label: Create Tasks
|
||||||
|
agent: speckit.tasks
|
||||||
|
prompt: Break the plan into tasks
|
||||||
|
send: true
|
||||||
|
- label: Create Checklist
|
||||||
|
agent: speckit.checklist
|
||||||
|
prompt: Create a checklist for the following domain...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/powershell/setup-plan.ps1 -Json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied).
|
||||||
|
|
||||||
|
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
|
||||||
|
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
|
||||||
|
- Fill Constitution Check section from constitution
|
||||||
|
- Evaluate gates (ERROR if violations unjustified)
|
||||||
|
- Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
|
||||||
|
- Phase 1: Generate data-model.md, contracts/, quickstart.md
|
||||||
|
- Phase 1: Update agent context by running the agent script
|
||||||
|
- Re-evaluate Constitution Check post-design
|
||||||
|
|
||||||
|
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
||||||
|
|
||||||
|
## Phases
|
||||||
|
|
||||||
|
### Phase 0: Outline & Research
|
||||||
|
|
||||||
|
1. **Extract unknowns from Technical Context** above:
|
||||||
|
- For each NEEDS CLARIFICATION → research task
|
||||||
|
- For each dependency → best practices task
|
||||||
|
- For each integration → patterns task
|
||||||
|
|
||||||
|
2. **Generate and dispatch research agents**:
|
||||||
|
|
||||||
|
```text
|
||||||
|
For each unknown in Technical Context:
|
||||||
|
Task: "Research {unknown} for {feature context}"
|
||||||
|
For each technology choice:
|
||||||
|
Task: "Find best practices for {tech} in {domain}"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Consolidate findings** in `research.md` using format:
|
||||||
|
- Decision: [what was chosen]
|
||||||
|
- Rationale: [why chosen]
|
||||||
|
- Alternatives considered: [what else evaluated]
|
||||||
|
|
||||||
|
**Output**: research.md with all NEEDS CLARIFICATION resolved
|
||||||
|
|
||||||
|
### Phase 1: Design & Contracts
|
||||||
|
|
||||||
|
**Prerequisites:** `research.md` complete
|
||||||
|
|
||||||
|
1. **Extract entities from feature spec** → `data-model.md`:
|
||||||
|
- Entity name, fields, relationships
|
||||||
|
- Validation rules from requirements
|
||||||
|
- State transitions if applicable
|
||||||
|
|
||||||
|
2. **Define interface contracts** (if project has external interfaces) → `/contracts/`:
|
||||||
|
- Identify what interfaces the project exposes to users or other systems
|
||||||
|
- Document the contract format appropriate for the project type
|
||||||
|
- Examples: public APIs for libraries, command schemas for CLI tools, endpoints for web services, grammars for parsers, UI contracts for applications
|
||||||
|
- Skip if project is purely internal (build scripts, one-off tools, etc.)
|
||||||
|
|
||||||
|
3. **Agent context update**:
|
||||||
|
- Run `.specify/scripts/powershell/update-agent-context.ps1 -AgentType claude`
|
||||||
|
- These scripts detect which AI agent is in use
|
||||||
|
- Update the appropriate agent-specific context file
|
||||||
|
- Add only new technology from current plan
|
||||||
|
- Preserve manual additions between markers
|
||||||
|
|
||||||
|
**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file
|
||||||
|
|
||||||
|
## Key rules
|
||||||
|
|
||||||
|
- Use absolute paths
|
||||||
|
- ERROR on gate failures or unresolved clarifications
|
||||||
258
.claude/commands/speckit.specify.md
Normal file
258
.claude/commands/speckit.specify.md
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
---
|
||||||
|
description: Create or update the feature specification from a natural language feature description.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Technical Plan
|
||||||
|
agent: speckit.plan
|
||||||
|
prompt: Create a plan for the spec. I am building with...
|
||||||
|
- label: Clarify Spec Requirements
|
||||||
|
agent: speckit.clarify
|
||||||
|
prompt: Clarify specification requirements
|
||||||
|
send: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `$ARGUMENTS` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
||||||
|
|
||||||
|
Given that feature description, do this:
|
||||||
|
|
||||||
|
1. **Generate a concise short name** (2-4 words) for the branch:
|
||||||
|
- Analyze the feature description and extract the most meaningful keywords
|
||||||
|
- Create a 2-4 word short name that captures the essence of the feature
|
||||||
|
- Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
|
||||||
|
- Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
|
||||||
|
- Keep it concise but descriptive enough to understand the feature at a glance
|
||||||
|
- Examples:
|
||||||
|
- "I want to add user authentication" → "user-auth"
|
||||||
|
- "Implement OAuth2 integration for the API" → "oauth2-api-integration"
|
||||||
|
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||||
|
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||||
|
|
||||||
|
2. **Check for existing branches before creating new one**:
|
||||||
|
|
||||||
|
a. First, fetch all remote branches to ensure we have the latest information:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git fetch --all --prune
|
||||||
|
```
|
||||||
|
|
||||||
|
b. Find the highest feature number across all sources for the short-name:
|
||||||
|
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
|
||||||
|
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
|
||||||
|
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
|
||||||
|
|
||||||
|
c. Determine the next available number:
|
||||||
|
- Extract all numbers from all three sources
|
||||||
|
- Find the highest number N
|
||||||
|
- Use N+1 for the new branch number
|
||||||
|
|
||||||
|
d. Run the script `.specify/scripts/powershell/create-new-feature.ps1 -Json "$ARGUMENTS"` with the calculated number and short-name:
|
||||||
|
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||||
|
- Bash example: `.specify/scripts/powershell/create-new-feature.ps1 -Json "$ARGUMENTS" --json --number 5 --short-name "user-auth" "Add user authentication"`
|
||||||
|
- PowerShell example: `.specify/scripts/powershell/create-new-feature.ps1 -Json "$ARGUMENTS" -Json -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||||
|
|
||||||
|
**IMPORTANT**:
|
||||||
|
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||||
|
- Only match branches/directories with the exact short-name pattern
|
||||||
|
- If no existing branches/directories found with this short-name, start with number 1
|
||||||
|
- You must only ever run this script once per feature
|
||||||
|
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
|
||||||
|
- The JSON output will contain BRANCH_NAME and SPEC_FILE paths
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot")
|
||||||
|
|
||||||
|
3. Load `.specify/templates/spec-template.md` to understand required sections.
|
||||||
|
|
||||||
|
4. Follow this execution flow:
|
||||||
|
|
||||||
|
1. Parse user description from Input
|
||||||
|
If empty: ERROR "No feature description provided"
|
||||||
|
2. Extract key concepts from description
|
||||||
|
Identify: actors, actions, data, constraints
|
||||||
|
3. For unclear aspects:
|
||||||
|
- Make informed guesses based on context and industry standards
|
||||||
|
- Only mark with [NEEDS CLARIFICATION: specific question] if:
|
||||||
|
- The choice significantly impacts feature scope or user experience
|
||||||
|
- Multiple reasonable interpretations exist with different implications
|
||||||
|
- No reasonable default exists
|
||||||
|
- **LIMIT: Maximum 3 [NEEDS CLARIFICATION] markers total**
|
||||||
|
- Prioritize clarifications by impact: scope > security/privacy > user experience > technical details
|
||||||
|
4. Fill User Scenarios & Testing section
|
||||||
|
If no clear user flow: ERROR "Cannot determine user scenarios"
|
||||||
|
5. Generate Functional Requirements
|
||||||
|
Each requirement must be testable
|
||||||
|
Use reasonable defaults for unspecified details (document assumptions in Assumptions section)
|
||||||
|
6. Define Success Criteria
|
||||||
|
Create measurable, technology-agnostic outcomes
|
||||||
|
Include both quantitative metrics (time, performance, volume) and qualitative measures (user satisfaction, task completion)
|
||||||
|
Each criterion must be verifiable without implementation details
|
||||||
|
7. Identify Key Entities (if data involved)
|
||||||
|
8. Return: SUCCESS (spec ready for planning)
|
||||||
|
|
||||||
|
5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
|
||||||
|
|
||||||
|
6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
|
||||||
|
|
||||||
|
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# Specification Quality Checklist: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||||
|
**Created**: [DATE]
|
||||||
|
**Feature**: [Link to spec.md]
|
||||||
|
|
||||||
|
## Content Quality
|
||||||
|
|
||||||
|
- [ ] No implementation details (languages, frameworks, APIs)
|
||||||
|
- [ ] Focused on user value and business needs
|
||||||
|
- [ ] Written for non-technical stakeholders
|
||||||
|
- [ ] All mandatory sections completed
|
||||||
|
|
||||||
|
## Requirement Completeness
|
||||||
|
|
||||||
|
- [ ] No [NEEDS CLARIFICATION] markers remain
|
||||||
|
- [ ] Requirements are testable and unambiguous
|
||||||
|
- [ ] Success criteria are measurable
|
||||||
|
- [ ] Success criteria are technology-agnostic (no implementation details)
|
||||||
|
- [ ] All acceptance scenarios are defined
|
||||||
|
- [ ] Edge cases are identified
|
||||||
|
- [ ] Scope is clearly bounded
|
||||||
|
- [ ] Dependencies and assumptions identified
|
||||||
|
|
||||||
|
## Feature Readiness
|
||||||
|
|
||||||
|
- [ ] All functional requirements have clear acceptance criteria
|
||||||
|
- [ ] User scenarios cover primary flows
|
||||||
|
- [ ] Feature meets measurable outcomes defined in Success Criteria
|
||||||
|
- [ ] No implementation details leak into specification
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Items marked incomplete require spec updates before `/speckit.clarify` or `/speckit.plan`
|
||||||
|
```
|
||||||
|
|
||||||
|
b. **Run Validation Check**: Review the spec against each checklist item:
|
||||||
|
- For each item, determine if it passes or fails
|
||||||
|
- Document specific issues found (quote relevant spec sections)
|
||||||
|
|
||||||
|
c. **Handle Validation Results**:
|
||||||
|
|
||||||
|
- **If all items pass**: Mark checklist complete and proceed to step 6
|
||||||
|
|
||||||
|
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||||
|
1. List the failing items and specific issues
|
||||||
|
2. Update the spec to address each issue
|
||||||
|
3. Re-run validation until all items pass (max 3 iterations)
|
||||||
|
4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user
|
||||||
|
|
||||||
|
- **If [NEEDS CLARIFICATION] markers remain**:
|
||||||
|
1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec
|
||||||
|
2. **LIMIT CHECK**: If more than 3 markers exist, keep only the 3 most critical (by scope/security/UX impact) and make informed guesses for the rest
|
||||||
|
3. For each clarification needed (max 3), present options to user in this format:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Question [N]: [Topic]
|
||||||
|
|
||||||
|
**Context**: [Quote relevant spec section]
|
||||||
|
|
||||||
|
**What we need to know**: [Specific question from NEEDS CLARIFICATION marker]
|
||||||
|
|
||||||
|
**Suggested Answers**:
|
||||||
|
|
||||||
|
| Option | Answer | Implications |
|
||||||
|
|--------|--------|--------------|
|
||||||
|
| A | [First suggested answer] | [What this means for the feature] |
|
||||||
|
| B | [Second suggested answer] | [What this means for the feature] |
|
||||||
|
| C | [Third suggested answer] | [What this means for the feature] |
|
||||||
|
| Custom | Provide your own answer | [Explain how to provide custom input] |
|
||||||
|
|
||||||
|
**Your choice**: _[Wait for user response]_
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **CRITICAL - Table Formatting**: Ensure markdown tables are properly formatted:
|
||||||
|
- Use consistent spacing with pipes aligned
|
||||||
|
- Each cell should have spaces around content: `| Content |` not `|Content|`
|
||||||
|
- Header separator must have at least 3 dashes: `|--------|`
|
||||||
|
- Test that the table renders correctly in markdown preview
|
||||||
|
5. Number questions sequentially (Q1, Q2, Q3 - max 3 total)
|
||||||
|
6. Present all questions together before waiting for responses
|
||||||
|
7. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B")
|
||||||
|
8. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer
|
||||||
|
9. Re-run validation after all clarifications are resolved
|
||||||
|
|
||||||
|
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
|
||||||
|
|
||||||
|
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
||||||
|
|
||||||
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
|
## General Guidelines
|
||||||
|
|
||||||
|
## Quick Guidelines
|
||||||
|
|
||||||
|
- Focus on **WHAT** users need and **WHY**.
|
||||||
|
- Avoid HOW to implement (no tech stack, APIs, code structure).
|
||||||
|
- Written for business stakeholders, not developers.
|
||||||
|
- DO NOT create any checklists that are embedded in the spec. That will be a separate command.
|
||||||
|
|
||||||
|
### Section Requirements
|
||||||
|
|
||||||
|
- **Mandatory sections**: Must be completed for every feature
|
||||||
|
- **Optional sections**: Include only when relevant to the feature
|
||||||
|
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
|
||||||
|
|
||||||
|
### For AI Generation
|
||||||
|
|
||||||
|
When creating this spec from a user prompt:
|
||||||
|
|
||||||
|
1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps
|
||||||
|
2. **Document assumptions**: Record reasonable defaults in the Assumptions section
|
||||||
|
3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that:
|
||||||
|
- Significantly impact feature scope or user experience
|
||||||
|
- Have multiple reasonable interpretations with different implications
|
||||||
|
- Lack any reasonable default
|
||||||
|
4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details
|
||||||
|
5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
|
||||||
|
6. **Common areas needing clarification** (only if no reasonable default exists):
|
||||||
|
- Feature scope and boundaries (include/exclude specific use cases)
|
||||||
|
- User types and permissions (if multiple conflicting interpretations possible)
|
||||||
|
- Security/compliance requirements (when legally/financially significant)
|
||||||
|
|
||||||
|
**Examples of reasonable defaults** (don't ask about these):
|
||||||
|
|
||||||
|
- Data retention: Industry-standard practices for the domain
|
||||||
|
- Performance targets: Standard web/mobile app expectations unless specified
|
||||||
|
- Error handling: User-friendly messages with appropriate fallbacks
|
||||||
|
- Authentication method: Standard session-based or OAuth2 for web apps
|
||||||
|
- Integration patterns: Use project-appropriate patterns (REST/GraphQL for web services, function calls for libraries, CLI args for tools, etc.)
|
||||||
|
|
||||||
|
### Success Criteria Guidelines
|
||||||
|
|
||||||
|
Success criteria must be:
|
||||||
|
|
||||||
|
1. **Measurable**: Include specific metrics (time, percentage, count, rate)
|
||||||
|
2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools
|
||||||
|
3. **User-focused**: Describe outcomes from user/business perspective, not system internals
|
||||||
|
4. **Verifiable**: Can be tested/validated without knowing implementation details
|
||||||
|
|
||||||
|
**Good examples**:
|
||||||
|
|
||||||
|
- "Users can complete checkout in under 3 minutes"
|
||||||
|
- "System supports 10,000 concurrent users"
|
||||||
|
- "95% of searches return results in under 1 second"
|
||||||
|
- "Task completion rate improves by 40%"
|
||||||
|
|
||||||
|
**Bad examples** (implementation-focused):
|
||||||
|
|
||||||
|
- "API response time is under 200ms" (too technical, use "Users see results instantly")
|
||||||
|
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
||||||
|
- "React components render efficiently" (framework-specific)
|
||||||
|
- "Redis cache hit rate above 80%" (technology-specific)
|
||||||
137
.claude/commands/speckit.tasks.md
Normal file
137
.claude/commands/speckit.tasks.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
---
|
||||||
|
description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
|
||||||
|
handoffs:
|
||||||
|
- label: Analyze For Consistency
|
||||||
|
agent: speckit.analyze
|
||||||
|
prompt: Run a project analysis for consistency
|
||||||
|
send: true
|
||||||
|
- label: Implement Project
|
||||||
|
agent: speckit.implement
|
||||||
|
prompt: Start the implementation in phases
|
||||||
|
send: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Load design documents**: Read from FEATURE_DIR:
|
||||||
|
- **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
|
||||||
|
- **Optional**: data-model.md (entities), contracts/ (interface contracts), research.md (decisions), quickstart.md (test scenarios)
|
||||||
|
- Note: Not all projects have all documents. Generate tasks based on what's available.
|
||||||
|
|
||||||
|
3. **Execute task generation workflow**:
|
||||||
|
- Load plan.md and extract tech stack, libraries, project structure
|
||||||
|
- Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
|
||||||
|
- If data-model.md exists: Extract entities and map to user stories
|
||||||
|
- If contracts/ exists: Map interface contracts to user stories
|
||||||
|
- If research.md exists: Extract decisions for setup tasks
|
||||||
|
- Generate tasks organized by user story (see Task Generation Rules below)
|
||||||
|
- Generate dependency graph showing user story completion order
|
||||||
|
- Create parallel execution examples per user story
|
||||||
|
- Validate task completeness (each user story has all needed tasks, independently testable)
|
||||||
|
|
||||||
|
4. **Generate tasks.md**: Use `.specify/templates/tasks-template.md` as structure, fill with:
|
||||||
|
- Correct feature name from plan.md
|
||||||
|
- Phase 1: Setup tasks (project initialization)
|
||||||
|
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
|
||||||
|
- Phase 3+: One phase per user story (in priority order from spec.md)
|
||||||
|
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
|
||||||
|
- Final Phase: Polish & cross-cutting concerns
|
||||||
|
- All tasks must follow the strict checklist format (see Task Generation Rules below)
|
||||||
|
- Clear file paths for each task
|
||||||
|
- Dependencies section showing story completion order
|
||||||
|
- Parallel execution examples per story
|
||||||
|
- Implementation strategy section (MVP first, incremental delivery)
|
||||||
|
|
||||||
|
5. **Report**: Output path to generated tasks.md and summary:
|
||||||
|
- Total task count
|
||||||
|
- Task count per user story
|
||||||
|
- Parallel opportunities identified
|
||||||
|
- Independent test criteria for each story
|
||||||
|
- Suggested MVP scope (typically just User Story 1)
|
||||||
|
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
|
||||||
|
|
||||||
|
Context for task generation: $ARGUMENTS
|
||||||
|
|
||||||
|
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
|
||||||
|
|
||||||
|
## Task Generation Rules
|
||||||
|
|
||||||
|
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
|
||||||
|
|
||||||
|
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
|
||||||
|
|
||||||
|
### Checklist Format (REQUIRED)
|
||||||
|
|
||||||
|
Every task MUST strictly follow this format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
- [ ] [TaskID] [P?] [Story?] Description with file path
|
||||||
|
```
|
||||||
|
|
||||||
|
**Format Components**:
|
||||||
|
|
||||||
|
1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
|
||||||
|
2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
|
||||||
|
3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
|
||||||
|
4. **[Story] label**: REQUIRED for user story phase tasks only
|
||||||
|
- Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
|
||||||
|
- Setup phase: NO story label
|
||||||
|
- Foundational phase: NO story label
|
||||||
|
- User Story phases: MUST have story label
|
||||||
|
- Polish phase: NO story label
|
||||||
|
5. **Description**: Clear action with exact file path
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
|
||||||
|
- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
|
||||||
|
- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
|
||||||
|
- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
|
||||||
|
- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
|
||||||
|
- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
|
||||||
|
- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
|
||||||
|
|
||||||
|
### Task Organization
|
||||||
|
|
||||||
|
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
|
||||||
|
- Each user story (P1, P2, P3...) gets its own phase
|
||||||
|
- Map all related components to their story:
|
||||||
|
- Models needed for that story
|
||||||
|
- Services needed for that story
|
||||||
|
- Interfaces/UI needed for that story
|
||||||
|
- If tests requested: Tests specific to that story
|
||||||
|
- Mark story dependencies (most stories should be independent)
|
||||||
|
|
||||||
|
2. **From Contracts**:
|
||||||
|
- Map each interface contract → to the user story it serves
|
||||||
|
- If tests requested: Each interface contract → contract test task [P] before implementation in that story's phase
|
||||||
|
|
||||||
|
3. **From Data Model**:
|
||||||
|
- Map each entity to the user story(ies) that need it
|
||||||
|
- If entity serves multiple stories: Put in earliest story or Setup phase
|
||||||
|
- Relationships → service layer tasks in appropriate story phase
|
||||||
|
|
||||||
|
4. **From Setup/Infrastructure**:
|
||||||
|
- Shared infrastructure → Setup phase (Phase 1)
|
||||||
|
- Foundational/blocking tasks → Foundational phase (Phase 2)
|
||||||
|
- Story-specific setup → within that story's phase
|
||||||
|
|
||||||
|
### Phase Structure
|
||||||
|
|
||||||
|
- **Phase 1**: Setup (project initialization)
|
||||||
|
- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
|
||||||
|
- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
|
||||||
|
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
||||||
|
- Each phase should be a complete, independently testable increment
|
||||||
|
- **Final Phase**: Polish & Cross-Cutting Concerns
|
||||||
30
.claude/commands/speckit.taskstoissues.md
Normal file
30
.claude/commands/speckit.taskstoissues.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
description: Convert existing tasks into actionable, dependency-ordered GitHub issues for the feature based on available design artifacts.
|
||||||
|
tools: ['github/github-mcp-server/issue_write']
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
1. From the executed script, extract the path to **tasks**.
|
||||||
|
1. Get the Git remote by running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git config --get remote.origin.url
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> ONLY PROCEED TO NEXT STEPS IF THE REMOTE IS A GITHUB URL
|
||||||
|
|
||||||
|
1. For each task in the list, use the GitHub MCP server to create a new issue in the repository that is representative of the Git remote.
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> UNDER NO CIRCUMSTANCES EVER CREATE ISSUES IN REPOSITORIES THAT DO NOT MATCH THE REMOTE URL
|
||||||
85
.env.example
Normal file
85
.env.example
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# ImageApi configuration template. Copy to `.env` and fill in for your
|
||||||
|
# deploy. Comments mirror the canonical docs in CLAUDE.md — see there
|
||||||
|
# for the full picture (especially the AI-Insights / Apollo / face
|
||||||
|
# integration sections).
|
||||||
|
|
||||||
|
# ── Required ────────────────────────────────────────────────────────────
|
||||||
|
DATABASE_URL=./database.db
|
||||||
|
BASE_PATH=/path/to/media
|
||||||
|
THUMBNAILS=/path/to/thumbnails
|
||||||
|
VIDEO_PATH=/path/to/video/hls
|
||||||
|
GIFS_DIRECTORY=/path/to/gifs
|
||||||
|
PREVIEW_CLIPS_DIRECTORY=/path/to/preview-clips
|
||||||
|
BIND_URL=0.0.0.0:8080
|
||||||
|
CORS_ALLOWED_ORIGINS=http://localhost:3000
|
||||||
|
SECRET_KEY=replace-me-with-a-long-random-secret
|
||||||
|
RUST_LOG=info
|
||||||
|
|
||||||
|
# ── File watching ───────────────────────────────────────────────────────
|
||||||
|
# Quick scan = recently-modified-files only; full scan = comprehensive walk.
|
||||||
|
WATCH_QUICK_INTERVAL_SECONDS=60
|
||||||
|
WATCH_FULL_INTERVAL_SECONDS=3600
|
||||||
|
# Comma-separated path prefixes / component names to skip in /memories
|
||||||
|
# AND in face detection (e.g. @eaDir, .thumbnails, /private).
|
||||||
|
EXCLUDED_DIRS=
|
||||||
|
|
||||||
|
# ── Video / HLS ─────────────────────────────────────────────────────────
|
||||||
|
HLS_CONCURRENCY=2
|
||||||
|
HLS_TIMEOUT_SECONDS=900
|
||||||
|
PLAYLIST_CLEANUP_INTERVAL_SECONDS=86400
|
||||||
|
|
||||||
|
# ── Telemetry (release builds only) ─────────────────────────────────────
|
||||||
|
# OTLP_OTLS_ENDPOINT=http://localhost:4317
|
||||||
|
|
||||||
|
# ── AI Insights — Ollama (local LLM) ────────────────────────────────────
|
||||||
|
OLLAMA_PRIMARY_URL=http://localhost:11434
|
||||||
|
OLLAMA_PRIMARY_MODEL=nemotron-3-nano:30b
|
||||||
|
# Optional fallback server tried on connection failure.
|
||||||
|
# OLLAMA_FALLBACK_URL=http://server:11434
|
||||||
|
# OLLAMA_FALLBACK_MODEL=llama3.2:3b
|
||||||
|
OLLAMA_REQUEST_TIMEOUT_SECONDS=120
|
||||||
|
# Cap on tool-calling iterations per chat turn / agentic insight.
|
||||||
|
AGENTIC_MAX_ITERATIONS=6
|
||||||
|
AGENTIC_CHAT_MAX_ITERATIONS=6
|
||||||
|
|
||||||
|
# ── AI Insights — OpenRouter (hybrid backend, optional) ─────────────────
|
||||||
|
# Set OPENROUTER_API_KEY to enable the hybrid backend (vision stays
|
||||||
|
# local on Ollama, chat routes to OpenRouter).
|
||||||
|
# OPENROUTER_API_KEY=sk-or-...
|
||||||
|
# OPENROUTER_DEFAULT_MODEL=anthropic/claude-sonnet-4
|
||||||
|
# OPENROUTER_ALLOWED_MODELS=openai/gpt-4o-mini,anthropic/claude-haiku-4-5,google/gemini-2.5-flash
|
||||||
|
# OPENROUTER_BASE_URL=https://openrouter.ai/api/v1
|
||||||
|
# OPENROUTER_EMBEDDING_MODEL=openai/text-embedding-3-small
|
||||||
|
# OPENROUTER_HTTP_REFERER=https://your-site.example
|
||||||
|
# OPENROUTER_APP_TITLE=ImageApi
|
||||||
|
|
||||||
|
# ── AI Insights — sibling services (optional) ───────────────────────────
|
||||||
|
# Apollo (places + face inference). Single Apollo deploys typically set
|
||||||
|
# only APOLLO_API_BASE_URL and let the face client fall back to it.
|
||||||
|
# APOLLO_API_BASE_URL=http://apollo.lan:8000
|
||||||
|
# APOLLO_FACE_API_BASE_URL=http://apollo.lan:8000
|
||||||
|
# SMS_API_URL=http://localhost:8000
|
||||||
|
# SMS_API_TOKEN=
|
||||||
|
|
||||||
|
# Display name used in agentic prompts when the LLM refers to "you".
|
||||||
|
USER_NAME=
|
||||||
|
|
||||||
|
# ── Face detection (Phase 3+) ───────────────────────────────────────────
|
||||||
|
# Cosine-sim floor for auto-binding a detected face to an existing
|
||||||
|
# same-named person on detection. 0.4 ≈ moderate-confidence match.
|
||||||
|
FACE_AUTOBIND_MIN_COS=0.4
|
||||||
|
# Per-scan-tick fan-out into Apollo's detect endpoint. Apollo's GPU
|
||||||
|
# pool serializes server-side; this just overlaps file-IO with
|
||||||
|
# inference RTT.
|
||||||
|
FACE_DETECT_CONCURRENCY=8
|
||||||
|
# Per-detect HTTP timeout. CPU-only Apollo deploys may need higher.
|
||||||
|
FACE_DETECT_TIMEOUT_SEC=60
|
||||||
|
# Per-tick caps on the two backlog drains (independent of WATCH_*
|
||||||
|
# quick / full scans). Tune up if you have a large unscanned backlog
|
||||||
|
# and want it to clear faster; tune down if Apollo is overloaded.
|
||||||
|
FACE_BACKLOG_MAX_PER_TICK=64
|
||||||
|
FACE_HASH_BACKFILL_MAX_PER_TICK=2000
|
||||||
|
|
||||||
|
# ── RAG / search ────────────────────────────────────────────────────────
|
||||||
|
# Set to `1` to enable cross-encoder reranking on /search results.
|
||||||
|
SEARCH_RAG_RERANK=0
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,13 +1,18 @@
|
|||||||
/target
|
/target
|
||||||
database/target
|
database/target
|
||||||
*.db
|
*.db
|
||||||
|
*.db.bak
|
||||||
.env
|
.env
|
||||||
|
/tmp
|
||||||
|
|
||||||
# Default ignored files
|
# Default ignored files
|
||||||
.idea/shelf/
|
.idea/shelf/
|
||||||
.idea/workspace.xml
|
.idea/workspace.xml
|
||||||
|
.idea/inspectionProfiles/
|
||||||
|
.idea/markdown.xml
|
||||||
# Datasource local storage ignored files
|
# Datasource local storage ignored files
|
||||||
.idea/dataSources*
|
.idea/dataSources*
|
||||||
.idea/dataSources.local.xml
|
.idea/dataSources.local.xml
|
||||||
# Editor-based HTTP Client requests
|
# Editor-based HTTP Client requests
|
||||||
.idea/httpRequests/
|
.idea/httpRequests/
|
||||||
|
/.claude/settings.local.json
|
||||||
|
|||||||
1
.idea/image-api.iml
generated
1
.idea/image-api.iml
generated
@@ -3,6 +3,7 @@
|
|||||||
<component name="NewModuleRootManager">
|
<component name="NewModuleRootManager">
|
||||||
<content url="file://$MODULE_DIR$">
|
<content url="file://$MODULE_DIR$">
|
||||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
||||||
|
<excludeFolder url="file://$MODULE_DIR$/.idea/dataSources" />
|
||||||
<excludeFolder url="file://$MODULE_DIR$/target" />
|
<excludeFolder url="file://$MODULE_DIR$/target" />
|
||||||
</content>
|
</content>
|
||||||
<orderEntry type="inheritedJdk" />
|
<orderEntry type="inheritedJdk" />
|
||||||
|
|||||||
149
.specify/memory/constitution.md
Normal file
149
.specify/memory/constitution.md
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
<!--
|
||||||
|
Sync Impact Report
|
||||||
|
==================
|
||||||
|
Version change: (new) -> 1.0.0
|
||||||
|
Modified principles: N/A (initial ratification)
|
||||||
|
Added sections:
|
||||||
|
- Core Principles (5 principles)
|
||||||
|
- Technology Stack & Constraints
|
||||||
|
- Development Workflow
|
||||||
|
- Governance
|
||||||
|
Removed sections: N/A
|
||||||
|
Templates requiring updates:
|
||||||
|
- .specify/templates/plan-template.md — ✅ no changes needed (Constitution Check section is generic)
|
||||||
|
- .specify/templates/spec-template.md — ✅ no changes needed
|
||||||
|
- .specify/templates/tasks-template.md — ✅ no changes needed
|
||||||
|
- .specify/templates/checklist-template.md — ✅ no changes needed
|
||||||
|
- .specify/templates/agent-file-template.md — ✅ no changes needed
|
||||||
|
Follow-up TODOs: None
|
||||||
|
-->
|
||||||
|
|
||||||
|
# ImageApi Constitution
|
||||||
|
|
||||||
|
## Core Principles
|
||||||
|
|
||||||
|
### I. Layered Architecture
|
||||||
|
|
||||||
|
All features MUST follow the established layered architecture:
|
||||||
|
|
||||||
|
- **HTTP Layer** (`main.rs`, feature modules): Route handlers, request
|
||||||
|
parsing, response formatting. No direct database access.
|
||||||
|
- **Service Layer** (`files.rs`, `exif.rs`, `memories.rs`, etc.): Business
|
||||||
|
logic. No HTTP-specific types.
|
||||||
|
- **DAO Layer** (`database/` trait definitions): Trait-based data access
|
||||||
|
contracts. Every DAO MUST be defined as a trait to enable mock
|
||||||
|
implementations for testing.
|
||||||
|
- **Database Layer** (Diesel ORM, `schema.rs`): Concrete `Sqlite*Dao`
|
||||||
|
implementations. All queries traced with OpenTelemetry.
|
||||||
|
|
||||||
|
New features MUST NOT bypass layers (e.g., HTTP handlers MUST NOT
|
||||||
|
execute raw SQL). Actix actors are permitted for long-running async
|
||||||
|
work (video processing, file watching) but MUST interact with the
|
||||||
|
DAO layer through the established trait interfaces.
|
||||||
|
|
||||||
|
### II. Path Safety (NON-NEGOTIABLE)
|
||||||
|
|
||||||
|
All user-supplied file paths MUST be validated against `BASE_PATH`
|
||||||
|
using `is_valid_full_path()` before any filesystem operation. This
|
||||||
|
prevents directory traversal attacks.
|
||||||
|
|
||||||
|
- Paths stored in the database MUST be relative to `BASE_PATH`.
|
||||||
|
- Paths passed to external tools (ffmpeg, image processing) MUST be
|
||||||
|
fully resolved absolute paths.
|
||||||
|
- Extension detection MUST use the centralized helpers in
|
||||||
|
`file_types.rs` (case-insensitive). Manual string matching on
|
||||||
|
extensions is prohibited.
|
||||||
|
|
||||||
|
### III. Trait-Based Testability
|
||||||
|
|
||||||
|
All data access MUST go through trait-based DAOs so that every
|
||||||
|
handler and service can be tested with mock implementations.
|
||||||
|
|
||||||
|
- Each DAO trait MUST be defined in `src/database/` and require
|
||||||
|
`Sync + Send`.
|
||||||
|
- Mock DAOs for testing MUST live in `src/testhelpers.rs`.
|
||||||
|
- Integration tests against real SQLite MUST use in-memory databases
|
||||||
|
via `in_memory_db_connection()` from `database::test`.
|
||||||
|
- Handler tests MUST use `actix_web::test` utilities with JWT token
|
||||||
|
injection (using `Claims::valid_user()` and the `test_key` secret).
|
||||||
|
- New DAO implementations MUST include a `#[cfg(test)]` constructor
|
||||||
|
(e.g., `from_connection`) accepting an injected connection.
|
||||||
|
|
||||||
|
### IV. Environment-Driven Configuration
|
||||||
|
|
||||||
|
Server behavior MUST be controlled through environment variables
|
||||||
|
loaded from `.env` files. Hard-coded paths, URLs, or secrets are
|
||||||
|
prohibited.
|
||||||
|
|
||||||
|
- Required variables MUST call `.expect()` with a clear message at
|
||||||
|
startup so misconfiguration fails fast.
|
||||||
|
- Optional variables MUST use `.unwrap_or_else()` with sensible
|
||||||
|
defaults and be documented in `README.md`.
|
||||||
|
- Any new environment variable MUST be added to the README
|
||||||
|
environment section before the feature is considered complete.
|
||||||
|
|
||||||
|
### V. Observability
|
||||||
|
|
||||||
|
All database operations and HTTP handlers MUST be instrumented
|
||||||
|
with OpenTelemetry spans via the `trace_db_call` helper or
|
||||||
|
equivalent tracing macros.
|
||||||
|
|
||||||
|
- Release builds export traces to the configured OTLP endpoint.
|
||||||
|
- Debug builds use the basic logger.
|
||||||
|
- Prometheus metrics (`imageserver_image_total`,
|
||||||
|
`imageserver_video_total`) MUST be maintained for key counters.
|
||||||
|
- Errors MUST be logged at `error!` level with sufficient context
|
||||||
|
for debugging without reproducing the issue.
|
||||||
|
|
||||||
|
## Technology Stack & Constraints
|
||||||
|
|
||||||
|
- **Language**: Rust (stable toolchain, Cargo build system)
|
||||||
|
- **HTTP Framework**: Actix-web 4
|
||||||
|
- **ORM**: Diesel 2.2 with SQLite backend
|
||||||
|
- **Auth**: JWT (HS256) via `jsonwebtoken` crate, bcrypt password
|
||||||
|
hashing
|
||||||
|
- **Video Processing**: ffmpeg/ffprobe (CLI, must be on PATH)
|
||||||
|
- **Image Processing**: `image` crate for thumbnails, `kamadak-exif`
|
||||||
|
for EXIF extraction
|
||||||
|
- **Tracing**: OpenTelemetry with OTLP export (release),
|
||||||
|
basic logger (debug)
|
||||||
|
- **Testing**: `cargo test`, `actix_web::test`, in-memory SQLite
|
||||||
|
|
||||||
|
External dependencies (ffmpeg, Ollama) are optional runtime
|
||||||
|
requirements. The server MUST start and serve core functionality
|
||||||
|
(images, thumbnails, tags) without them. Features that depend on
|
||||||
|
optional services MUST degrade gracefully with logged warnings,
|
||||||
|
not panics.
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
- `cargo fmt` MUST pass before committing.
|
||||||
|
- `cargo clippy` warnings MUST be resolved or explicitly suppressed
|
||||||
|
with a justification comment.
|
||||||
|
- `cargo test` MUST pass with all tests green before merging to
|
||||||
|
master.
|
||||||
|
- Database schema changes MUST use Diesel migrations
|
||||||
|
(`diesel migration generate`), with hand-written SQL in `up.sql`
|
||||||
|
and `down.sql`, followed by `diesel print-schema` to regenerate
|
||||||
|
`schema.rs`.
|
||||||
|
- Features MUST be developed on named branches
|
||||||
|
(`###-feature-name`) and merged to master via pull request.
|
||||||
|
- File uploads MUST preserve existing files (append timestamp on
|
||||||
|
conflict, never overwrite).
|
||||||
|
|
||||||
|
## Governance
|
||||||
|
|
||||||
|
This constitution defines the non-negotiable architectural and
|
||||||
|
development standards for the ImageApi project. All code changes
|
||||||
|
MUST comply with these principles.
|
||||||
|
|
||||||
|
- **Amendments**: Any change to this constitution MUST be documented
|
||||||
|
with a version bump, rationale, and updated Sync Impact Report.
|
||||||
|
- **Versioning**: MAJOR for principle removals/redefinitions, MINOR
|
||||||
|
for new principles or material expansions, PATCH for wording
|
||||||
|
clarifications.
|
||||||
|
- **Compliance**: Pull request reviews SHOULD verify adherence to
|
||||||
|
these principles. The CLAUDE.md file provides runtime development
|
||||||
|
guidance and MUST remain consistent with this constitution.
|
||||||
|
|
||||||
|
**Version**: 1.0.0 | **Ratified**: 2026-02-26 | **Last Amended**: 2026-02-26
|
||||||
148
.specify/scripts/powershell/check-prerequisites.ps1
Normal file
148
.specify/scripts/powershell/check-prerequisites.ps1
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
|
||||||
|
# Consolidated prerequisite checking script (PowerShell)
|
||||||
|
#
|
||||||
|
# This script provides unified prerequisite checking for Spec-Driven Development workflow.
|
||||||
|
# It replaces the functionality previously spread across multiple scripts.
|
||||||
|
#
|
||||||
|
# Usage: ./check-prerequisites.ps1 [OPTIONS]
|
||||||
|
#
|
||||||
|
# OPTIONS:
|
||||||
|
# -Json Output in JSON format
|
||||||
|
# -RequireTasks Require tasks.md to exist (for implementation phase)
|
||||||
|
# -IncludeTasks Include tasks.md in AVAILABLE_DOCS list
|
||||||
|
# -PathsOnly Only output path variables (no validation)
|
||||||
|
# -Help, -h Show help message
|
||||||
|
|
||||||
|
[CmdletBinding()]
|
||||||
|
param(
|
||||||
|
[switch]$Json,
|
||||||
|
[switch]$RequireTasks,
|
||||||
|
[switch]$IncludeTasks,
|
||||||
|
[switch]$PathsOnly,
|
||||||
|
[switch]$Help
|
||||||
|
)
|
||||||
|
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# Show help if requested
|
||||||
|
if ($Help) {
|
||||||
|
Write-Output @"
|
||||||
|
Usage: check-prerequisites.ps1 [OPTIONS]
|
||||||
|
|
||||||
|
Consolidated prerequisite checking for Spec-Driven Development workflow.
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
-Json Output in JSON format
|
||||||
|
-RequireTasks Require tasks.md to exist (for implementation phase)
|
||||||
|
-IncludeTasks Include tasks.md in AVAILABLE_DOCS list
|
||||||
|
-PathsOnly Only output path variables (no prerequisite validation)
|
||||||
|
-Help, -h Show this help message
|
||||||
|
|
||||||
|
EXAMPLES:
|
||||||
|
# Check task prerequisites (plan.md required)
|
||||||
|
.\check-prerequisites.ps1 -Json
|
||||||
|
|
||||||
|
# Check implementation prerequisites (plan.md + tasks.md required)
|
||||||
|
.\check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks
|
||||||
|
|
||||||
|
# Get feature paths only (no validation)
|
||||||
|
.\check-prerequisites.ps1 -PathsOnly
|
||||||
|
|
||||||
|
"@
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Source common functions
|
||||||
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
|
# Get feature paths and validate branch
|
||||||
|
$paths = Get-FeaturePathsEnv
|
||||||
|
|
||||||
|
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit:$paths.HAS_GIT)) {
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# If paths-only mode, output paths and exit (support combined -Json -PathsOnly)
|
||||||
|
if ($PathsOnly) {
|
||||||
|
if ($Json) {
|
||||||
|
[PSCustomObject]@{
|
||||||
|
REPO_ROOT = $paths.REPO_ROOT
|
||||||
|
BRANCH = $paths.CURRENT_BRANCH
|
||||||
|
FEATURE_DIR = $paths.FEATURE_DIR
|
||||||
|
FEATURE_SPEC = $paths.FEATURE_SPEC
|
||||||
|
IMPL_PLAN = $paths.IMPL_PLAN
|
||||||
|
TASKS = $paths.TASKS
|
||||||
|
} | ConvertTo-Json -Compress
|
||||||
|
} else {
|
||||||
|
Write-Output "REPO_ROOT: $($paths.REPO_ROOT)"
|
||||||
|
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||||
|
Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||||
|
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||||
|
Write-Output "TASKS: $($paths.TASKS)"
|
||||||
|
}
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate required directories and files
|
||||||
|
if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) {
|
||||||
|
Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "Run /speckit.specify first to create the feature structure."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) {
|
||||||
|
Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "Run /speckit.plan first to create the implementation plan."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for tasks.md if required
|
||||||
|
if ($RequireTasks -and -not (Test-Path $paths.TASKS -PathType Leaf)) {
|
||||||
|
Write-Output "ERROR: tasks.md not found in $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "Run /speckit.tasks first to create the task list."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build list of available documents
|
||||||
|
$docs = @()
|
||||||
|
|
||||||
|
# Always check these optional docs
|
||||||
|
if (Test-Path $paths.RESEARCH) { $docs += 'research.md' }
|
||||||
|
if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' }
|
||||||
|
|
||||||
|
# Check contracts directory (only if it exists and has files)
|
||||||
|
if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) {
|
||||||
|
$docs += 'contracts/'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' }
|
||||||
|
|
||||||
|
# Include tasks.md if requested and it exists
|
||||||
|
if ($IncludeTasks -and (Test-Path $paths.TASKS)) {
|
||||||
|
$docs += 'tasks.md'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Output results
|
||||||
|
if ($Json) {
|
||||||
|
# JSON output
|
||||||
|
[PSCustomObject]@{
|
||||||
|
FEATURE_DIR = $paths.FEATURE_DIR
|
||||||
|
AVAILABLE_DOCS = $docs
|
||||||
|
} | ConvertTo-Json -Compress
|
||||||
|
} else {
|
||||||
|
# Text output
|
||||||
|
Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "AVAILABLE_DOCS:"
|
||||||
|
|
||||||
|
# Show status of each potential document
|
||||||
|
Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null
|
||||||
|
Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null
|
||||||
|
Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null
|
||||||
|
Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null
|
||||||
|
|
||||||
|
if ($IncludeTasks) {
|
||||||
|
Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null
|
||||||
|
}
|
||||||
|
}
|
||||||
137
.specify/scripts/powershell/common.ps1
Normal file
137
.specify/scripts/powershell/common.ps1
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
# Common PowerShell functions analogous to common.sh
|
||||||
|
|
||||||
|
function Get-RepoRoot {
|
||||||
|
try {
|
||||||
|
$result = git rev-parse --show-toplevel 2>$null
|
||||||
|
if ($LASTEXITCODE -eq 0) {
|
||||||
|
return $result
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
# Git command failed
|
||||||
|
}
|
||||||
|
|
||||||
|
# Fall back to script location for non-git repos
|
||||||
|
return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-CurrentBranch {
|
||||||
|
# First check if SPECIFY_FEATURE environment variable is set
|
||||||
|
if ($env:SPECIFY_FEATURE) {
|
||||||
|
return $env:SPECIFY_FEATURE
|
||||||
|
}
|
||||||
|
|
||||||
|
# Then check git if available
|
||||||
|
try {
|
||||||
|
$result = git rev-parse --abbrev-ref HEAD 2>$null
|
||||||
|
if ($LASTEXITCODE -eq 0) {
|
||||||
|
return $result
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
# Git command failed
|
||||||
|
}
|
||||||
|
|
||||||
|
# For non-git repos, try to find the latest feature directory
|
||||||
|
$repoRoot = Get-RepoRoot
|
||||||
|
$specsDir = Join-Path $repoRoot "specs"
|
||||||
|
|
||||||
|
if (Test-Path $specsDir) {
|
||||||
|
$latestFeature = ""
|
||||||
|
$highest = 0
|
||||||
|
|
||||||
|
Get-ChildItem -Path $specsDir -Directory | ForEach-Object {
|
||||||
|
if ($_.Name -match '^(\d{3})-') {
|
||||||
|
$num = [int]$matches[1]
|
||||||
|
if ($num -gt $highest) {
|
||||||
|
$highest = $num
|
||||||
|
$latestFeature = $_.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($latestFeature) {
|
||||||
|
return $latestFeature
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Final fallback
|
||||||
|
return "main"
|
||||||
|
}
|
||||||
|
|
||||||
|
function Test-HasGit {
|
||||||
|
try {
|
||||||
|
git rev-parse --show-toplevel 2>$null | Out-Null
|
||||||
|
return ($LASTEXITCODE -eq 0)
|
||||||
|
} catch {
|
||||||
|
return $false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Test-FeatureBranch {
|
||||||
|
param(
|
||||||
|
[string]$Branch,
|
||||||
|
[bool]$HasGit = $true
|
||||||
|
)
|
||||||
|
|
||||||
|
# For non-git repos, we can't enforce branch naming but still provide output
|
||||||
|
if (-not $HasGit) {
|
||||||
|
Write-Warning "[specify] Warning: Git repository not detected; skipped branch validation"
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($Branch -notmatch '^[0-9]{3}-') {
|
||||||
|
Write-Output "ERROR: Not on a feature branch. Current branch: $Branch"
|
||||||
|
Write-Output "Feature branches should be named like: 001-feature-name"
|
||||||
|
return $false
|
||||||
|
}
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-FeatureDir {
|
||||||
|
param([string]$RepoRoot, [string]$Branch)
|
||||||
|
Join-Path $RepoRoot "specs/$Branch"
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-FeaturePathsEnv {
|
||||||
|
$repoRoot = Get-RepoRoot
|
||||||
|
$currentBranch = Get-CurrentBranch
|
||||||
|
$hasGit = Test-HasGit
|
||||||
|
$featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch
|
||||||
|
|
||||||
|
[PSCustomObject]@{
|
||||||
|
REPO_ROOT = $repoRoot
|
||||||
|
CURRENT_BRANCH = $currentBranch
|
||||||
|
HAS_GIT = $hasGit
|
||||||
|
FEATURE_DIR = $featureDir
|
||||||
|
FEATURE_SPEC = Join-Path $featureDir 'spec.md'
|
||||||
|
IMPL_PLAN = Join-Path $featureDir 'plan.md'
|
||||||
|
TASKS = Join-Path $featureDir 'tasks.md'
|
||||||
|
RESEARCH = Join-Path $featureDir 'research.md'
|
||||||
|
DATA_MODEL = Join-Path $featureDir 'data-model.md'
|
||||||
|
QUICKSTART = Join-Path $featureDir 'quickstart.md'
|
||||||
|
CONTRACTS_DIR = Join-Path $featureDir 'contracts'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Test-FileExists {
|
||||||
|
param([string]$Path, [string]$Description)
|
||||||
|
if (Test-Path -Path $Path -PathType Leaf) {
|
||||||
|
Write-Output " ✓ $Description"
|
||||||
|
return $true
|
||||||
|
} else {
|
||||||
|
Write-Output " ✗ $Description"
|
||||||
|
return $false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Test-DirHasFiles {
|
||||||
|
param([string]$Path, [string]$Description)
|
||||||
|
if ((Test-Path -Path $Path -PathType Container) -and (Get-ChildItem -Path $Path -ErrorAction SilentlyContinue | Where-Object { -not $_.PSIsContainer } | Select-Object -First 1)) {
|
||||||
|
Write-Output " ✓ $Description"
|
||||||
|
return $true
|
||||||
|
} else {
|
||||||
|
Write-Output " ✗ $Description"
|
||||||
|
return $false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
283
.specify/scripts/powershell/create-new-feature.ps1
Normal file
283
.specify/scripts/powershell/create-new-feature.ps1
Normal file
@@ -0,0 +1,283 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
# Create a new feature
|
||||||
|
[CmdletBinding()]
|
||||||
|
param(
|
||||||
|
[switch]$Json,
|
||||||
|
[string]$ShortName,
|
||||||
|
[int]$Number = 0,
|
||||||
|
[switch]$Help,
|
||||||
|
[Parameter(ValueFromRemainingArguments = $true)]
|
||||||
|
[string[]]$FeatureDescription
|
||||||
|
)
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# Show help if requested
|
||||||
|
if ($Help) {
|
||||||
|
Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] [-Number N] <feature description>"
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "Options:"
|
||||||
|
Write-Host " -Json Output in JSON format"
|
||||||
|
Write-Host " -ShortName <name> Provide a custom short name (2-4 words) for the branch"
|
||||||
|
Write-Host " -Number N Specify branch number manually (overrides auto-detection)"
|
||||||
|
Write-Host " -Help Show this help message"
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "Examples:"
|
||||||
|
Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'"
|
||||||
|
Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if feature description provided
|
||||||
|
if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) {
|
||||||
|
Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName <name>] <feature description>"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
$featureDesc = ($FeatureDescription -join ' ').Trim()
|
||||||
|
|
||||||
|
# Resolve repository root. Prefer git information when available, but fall back
|
||||||
|
# to searching for repository markers so the workflow still functions in repositories that
|
||||||
|
# were initialized with --no-git.
|
||||||
|
function Find-RepositoryRoot {
|
||||||
|
param(
|
||||||
|
[string]$StartDir,
|
||||||
|
[string[]]$Markers = @('.git', '.specify')
|
||||||
|
)
|
||||||
|
$current = Resolve-Path $StartDir
|
||||||
|
while ($true) {
|
||||||
|
foreach ($marker in $Markers) {
|
||||||
|
if (Test-Path (Join-Path $current $marker)) {
|
||||||
|
return $current
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$parent = Split-Path $current -Parent
|
||||||
|
if ($parent -eq $current) {
|
||||||
|
# Reached filesystem root without finding markers
|
||||||
|
return $null
|
||||||
|
}
|
||||||
|
$current = $parent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-HighestNumberFromSpecs {
|
||||||
|
param([string]$SpecsDir)
|
||||||
|
|
||||||
|
$highest = 0
|
||||||
|
if (Test-Path $SpecsDir) {
|
||||||
|
Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object {
|
||||||
|
if ($_.Name -match '^(\d+)') {
|
||||||
|
$num = [int]$matches[1]
|
||||||
|
if ($num -gt $highest) { $highest = $num }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return $highest
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-HighestNumberFromBranches {
|
||||||
|
param()
|
||||||
|
|
||||||
|
$highest = 0
|
||||||
|
try {
|
||||||
|
$branches = git branch -a 2>$null
|
||||||
|
if ($LASTEXITCODE -eq 0) {
|
||||||
|
foreach ($branch in $branches) {
|
||||||
|
# Clean branch name: remove leading markers and remote prefixes
|
||||||
|
$cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', ''
|
||||||
|
|
||||||
|
# Extract feature number if branch matches pattern ###-*
|
||||||
|
if ($cleanBranch -match '^(\d+)-') {
|
||||||
|
$num = [int]$matches[1]
|
||||||
|
if ($num -gt $highest) { $highest = $num }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
# If git command fails, return 0
|
||||||
|
Write-Verbose "Could not check Git branches: $_"
|
||||||
|
}
|
||||||
|
return $highest
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-NextBranchNumber {
|
||||||
|
param(
|
||||||
|
[string]$SpecsDir
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fetch all remotes to get latest branch info (suppress errors if no remotes)
|
||||||
|
try {
|
||||||
|
git fetch --all --prune 2>$null | Out-Null
|
||||||
|
} catch {
|
||||||
|
# Ignore fetch errors
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get highest number from ALL branches (not just matching short name)
|
||||||
|
$highestBranch = Get-HighestNumberFromBranches
|
||||||
|
|
||||||
|
# Get highest number from ALL specs (not just matching short name)
|
||||||
|
$highestSpec = Get-HighestNumberFromSpecs -SpecsDir $SpecsDir
|
||||||
|
|
||||||
|
# Take the maximum of both
|
||||||
|
$maxNum = [Math]::Max($highestBranch, $highestSpec)
|
||||||
|
|
||||||
|
# Return next number
|
||||||
|
return $maxNum + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function ConvertTo-CleanBranchName {
|
||||||
|
param([string]$Name)
|
||||||
|
|
||||||
|
return $Name.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', ''
|
||||||
|
}
|
||||||
|
$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot)
|
||||||
|
if (-not $fallbackRoot) {
|
||||||
|
Write-Error "Error: Could not determine repository root. Please run this script from within the repository."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
$repoRoot = git rev-parse --show-toplevel 2>$null
|
||||||
|
if ($LASTEXITCODE -eq 0) {
|
||||||
|
$hasGit = $true
|
||||||
|
} else {
|
||||||
|
throw "Git not available"
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
$repoRoot = $fallbackRoot
|
||||||
|
$hasGit = $false
|
||||||
|
}
|
||||||
|
|
||||||
|
Set-Location $repoRoot
|
||||||
|
|
||||||
|
$specsDir = Join-Path $repoRoot 'specs'
|
||||||
|
New-Item -ItemType Directory -Path $specsDir -Force | Out-Null
|
||||||
|
|
||||||
|
# Function to generate branch name with stop word filtering and length filtering
|
||||||
|
function Get-BranchName {
|
||||||
|
param([string]$Description)
|
||||||
|
|
||||||
|
# Common stop words to filter out
|
||||||
|
$stopWords = @(
|
||||||
|
'i', 'a', 'an', 'the', 'to', 'for', 'of', 'in', 'on', 'at', 'by', 'with', 'from',
|
||||||
|
'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
|
||||||
|
'do', 'does', 'did', 'will', 'would', 'should', 'could', 'can', 'may', 'might', 'must', 'shall',
|
||||||
|
'this', 'that', 'these', 'those', 'my', 'your', 'our', 'their',
|
||||||
|
'want', 'need', 'add', 'get', 'set'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert to lowercase and extract words (alphanumeric only)
|
||||||
|
$cleanName = $Description.ToLower() -replace '[^a-z0-9\s]', ' '
|
||||||
|
$words = $cleanName -split '\s+' | Where-Object { $_ }
|
||||||
|
|
||||||
|
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
|
||||||
|
$meaningfulWords = @()
|
||||||
|
foreach ($word in $words) {
|
||||||
|
# Skip stop words
|
||||||
|
if ($stopWords -contains $word) { continue }
|
||||||
|
|
||||||
|
# Keep words that are length >= 3 OR appear as uppercase in original (likely acronyms)
|
||||||
|
if ($word.Length -ge 3) {
|
||||||
|
$meaningfulWords += $word
|
||||||
|
} elseif ($Description -match "\b$($word.ToUpper())\b") {
|
||||||
|
# Keep short words if they appear as uppercase in original (likely acronyms)
|
||||||
|
$meaningfulWords += $word
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If we have meaningful words, use first 3-4 of them
|
||||||
|
if ($meaningfulWords.Count -gt 0) {
|
||||||
|
$maxWords = if ($meaningfulWords.Count -eq 4) { 4 } else { 3 }
|
||||||
|
$result = ($meaningfulWords | Select-Object -First $maxWords) -join '-'
|
||||||
|
return $result
|
||||||
|
} else {
|
||||||
|
# Fallback to original logic if no meaningful words found
|
||||||
|
$result = ConvertTo-CleanBranchName -Name $Description
|
||||||
|
$fallbackWords = ($result -split '-') | Where-Object { $_ } | Select-Object -First 3
|
||||||
|
return [string]::Join('-', $fallbackWords)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate branch name
|
||||||
|
if ($ShortName) {
|
||||||
|
# Use provided short name, just clean it up
|
||||||
|
$branchSuffix = ConvertTo-CleanBranchName -Name $ShortName
|
||||||
|
} else {
|
||||||
|
# Generate from description with smart filtering
|
||||||
|
$branchSuffix = Get-BranchName -Description $featureDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine branch number
|
||||||
|
if ($Number -eq 0) {
|
||||||
|
if ($hasGit) {
|
||||||
|
# Check existing branches on remotes
|
||||||
|
$Number = Get-NextBranchNumber -SpecsDir $specsDir
|
||||||
|
} else {
|
||||||
|
# Fall back to local directory check
|
||||||
|
$Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$featureNum = ('{0:000}' -f $Number)
|
||||||
|
$branchName = "$featureNum-$branchSuffix"
|
||||||
|
|
||||||
|
# GitHub enforces a 244-byte limit on branch names
|
||||||
|
# Validate and truncate if necessary
|
||||||
|
$maxBranchLength = 244
|
||||||
|
if ($branchName.Length -gt $maxBranchLength) {
|
||||||
|
# Calculate how much we need to trim from suffix
|
||||||
|
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||||
|
$maxSuffixLength = $maxBranchLength - 4
|
||||||
|
|
||||||
|
# Truncate suffix
|
||||||
|
$truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength))
|
||||||
|
# Remove trailing hyphen if truncation created one
|
||||||
|
$truncatedSuffix = $truncatedSuffix -replace '-$', ''
|
||||||
|
|
||||||
|
$originalBranchName = $branchName
|
||||||
|
$branchName = "$featureNum-$truncatedSuffix"
|
||||||
|
|
||||||
|
Write-Warning "[specify] Branch name exceeded GitHub's 244-byte limit"
|
||||||
|
Write-Warning "[specify] Original: $originalBranchName ($($originalBranchName.Length) bytes)"
|
||||||
|
Write-Warning "[specify] Truncated to: $branchName ($($branchName.Length) bytes)"
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($hasGit) {
|
||||||
|
try {
|
||||||
|
git checkout -b $branchName | Out-Null
|
||||||
|
} catch {
|
||||||
|
Write-Warning "Failed to create git branch: $branchName"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName"
|
||||||
|
}
|
||||||
|
|
||||||
|
$featureDir = Join-Path $specsDir $branchName
|
||||||
|
New-Item -ItemType Directory -Path $featureDir -Force | Out-Null
|
||||||
|
|
||||||
|
$template = Join-Path $repoRoot '.specify/templates/spec-template.md'
|
||||||
|
$specFile = Join-Path $featureDir 'spec.md'
|
||||||
|
if (Test-Path $template) {
|
||||||
|
Copy-Item $template $specFile -Force
|
||||||
|
} else {
|
||||||
|
New-Item -ItemType File -Path $specFile | Out-Null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set the SPECIFY_FEATURE environment variable for the current session
|
||||||
|
$env:SPECIFY_FEATURE = $branchName
|
||||||
|
|
||||||
|
if ($Json) {
|
||||||
|
$obj = [PSCustomObject]@{
|
||||||
|
BRANCH_NAME = $branchName
|
||||||
|
SPEC_FILE = $specFile
|
||||||
|
FEATURE_NUM = $featureNum
|
||||||
|
HAS_GIT = $hasGit
|
||||||
|
}
|
||||||
|
$obj | ConvertTo-Json -Compress
|
||||||
|
} else {
|
||||||
|
Write-Output "BRANCH_NAME: $branchName"
|
||||||
|
Write-Output "SPEC_FILE: $specFile"
|
||||||
|
Write-Output "FEATURE_NUM: $featureNum"
|
||||||
|
Write-Output "HAS_GIT: $hasGit"
|
||||||
|
Write-Output "SPECIFY_FEATURE environment variable set to: $branchName"
|
||||||
|
}
|
||||||
|
|
||||||
61
.specify/scripts/powershell/setup-plan.ps1
Normal file
61
.specify/scripts/powershell/setup-plan.ps1
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
# Setup implementation plan for a feature
|
||||||
|
|
||||||
|
[CmdletBinding()]
|
||||||
|
param(
|
||||||
|
[switch]$Json,
|
||||||
|
[switch]$Help
|
||||||
|
)
|
||||||
|
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# Show help if requested
|
||||||
|
if ($Help) {
|
||||||
|
Write-Output "Usage: ./setup-plan.ps1 [-Json] [-Help]"
|
||||||
|
Write-Output " -Json Output results in JSON format"
|
||||||
|
Write-Output " -Help Show this help message"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load common functions
|
||||||
|
. "$PSScriptRoot/common.ps1"
|
||||||
|
|
||||||
|
# Get all paths and variables from common functions
|
||||||
|
$paths = Get-FeaturePathsEnv
|
||||||
|
|
||||||
|
# Check if we're on a proper feature branch (only for git repos)
|
||||||
|
if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GIT)) {
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ensure the feature directory exists
|
||||||
|
New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null
|
||||||
|
|
||||||
|
# Copy plan template if it exists, otherwise note it or create empty file
|
||||||
|
$template = Join-Path $paths.REPO_ROOT '.specify/templates/plan-template.md'
|
||||||
|
if (Test-Path $template) {
|
||||||
|
Copy-Item $template $paths.IMPL_PLAN -Force
|
||||||
|
Write-Output "Copied plan template to $($paths.IMPL_PLAN)"
|
||||||
|
} else {
|
||||||
|
Write-Warning "Plan template not found at $template"
|
||||||
|
# Create a basic plan file if template doesn't exist
|
||||||
|
New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Output results
|
||||||
|
if ($Json) {
|
||||||
|
$result = [PSCustomObject]@{
|
||||||
|
FEATURE_SPEC = $paths.FEATURE_SPEC
|
||||||
|
IMPL_PLAN = $paths.IMPL_PLAN
|
||||||
|
SPECS_DIR = $paths.FEATURE_DIR
|
||||||
|
BRANCH = $paths.CURRENT_BRANCH
|
||||||
|
HAS_GIT = $paths.HAS_GIT
|
||||||
|
}
|
||||||
|
$result | ConvertTo-Json -Compress
|
||||||
|
} else {
|
||||||
|
Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)"
|
||||||
|
Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)"
|
||||||
|
Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)"
|
||||||
|
Write-Output "BRANCH: $($paths.CURRENT_BRANCH)"
|
||||||
|
Write-Output "HAS_GIT: $($paths.HAS_GIT)"
|
||||||
|
}
|
||||||
452
.specify/scripts/powershell/update-agent-context.ps1
Normal file
452
.specify/scripts/powershell/update-agent-context.ps1
Normal file
@@ -0,0 +1,452 @@
|
|||||||
|
#!/usr/bin/env pwsh
|
||||||
|
<#!
|
||||||
|
.SYNOPSIS
|
||||||
|
Update agent context files with information from plan.md (PowerShell version)
|
||||||
|
|
||||||
|
.DESCRIPTION
|
||||||
|
Mirrors the behavior of scripts/bash/update-agent-context.sh:
|
||||||
|
1. Environment Validation
|
||||||
|
2. Plan Data Extraction
|
||||||
|
3. Agent File Management (create from template or update existing)
|
||||||
|
4. Content Generation (technology stack, recent changes, timestamp)
|
||||||
|
5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, q, agy, bob, qodercli)
|
||||||
|
|
||||||
|
.PARAMETER AgentType
|
||||||
|
Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist).
|
||||||
|
|
||||||
|
.EXAMPLE
|
||||||
|
./update-agent-context.ps1 -AgentType claude
|
||||||
|
|
||||||
|
.EXAMPLE
|
||||||
|
./update-agent-context.ps1 # Updates all existing agent files
|
||||||
|
|
||||||
|
.NOTES
|
||||||
|
Relies on common helper functions in common.ps1
|
||||||
|
#>
|
||||||
|
param(
|
||||||
|
[Parameter(Position=0)]
|
||||||
|
[ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','q','agy','bob','qodercli','generic')]
|
||||||
|
[string]$AgentType
|
||||||
|
)
|
||||||
|
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# Import common helpers
|
||||||
|
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||||
|
. (Join-Path $ScriptDir 'common.ps1')
|
||||||
|
|
||||||
|
# Acquire environment paths
|
||||||
|
$envData = Get-FeaturePathsEnv
|
||||||
|
$REPO_ROOT = $envData.REPO_ROOT
|
||||||
|
$CURRENT_BRANCH = $envData.CURRENT_BRANCH
|
||||||
|
$HAS_GIT = $envData.HAS_GIT
|
||||||
|
$IMPL_PLAN = $envData.IMPL_PLAN
|
||||||
|
$NEW_PLAN = $IMPL_PLAN
|
||||||
|
|
||||||
|
# Agent file paths
|
||||||
|
$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md'
|
||||||
|
$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md'
|
||||||
|
$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md'
|
||||||
|
$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc'
|
||||||
|
$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md'
|
||||||
|
$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
|
$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md'
|
||||||
|
$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md'
|
||||||
|
$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md'
|
||||||
|
$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md'
|
||||||
|
$CODEBUDDY_FILE = Join-Path $REPO_ROOT 'CODEBUDDY.md'
|
||||||
|
$QODER_FILE = Join-Path $REPO_ROOT 'QODER.md'
|
||||||
|
$AMP_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
|
$SHAI_FILE = Join-Path $REPO_ROOT 'SHAI.md'
|
||||||
|
$Q_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
|
$AGY_FILE = Join-Path $REPO_ROOT '.agent/rules/specify-rules.md'
|
||||||
|
$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md'
|
||||||
|
|
||||||
|
$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md'
|
||||||
|
|
||||||
|
# Parsed plan data placeholders
|
||||||
|
$script:NEW_LANG = ''
|
||||||
|
$script:NEW_FRAMEWORK = ''
|
||||||
|
$script:NEW_DB = ''
|
||||||
|
$script:NEW_PROJECT_TYPE = ''
|
||||||
|
|
||||||
|
function Write-Info {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$Message
|
||||||
|
)
|
||||||
|
Write-Host "INFO: $Message"
|
||||||
|
}
|
||||||
|
|
||||||
|
function Write-Success {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$Message
|
||||||
|
)
|
||||||
|
Write-Host "$([char]0x2713) $Message"
|
||||||
|
}
|
||||||
|
|
||||||
|
function Write-WarningMsg {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$Message
|
||||||
|
)
|
||||||
|
Write-Warning $Message
|
||||||
|
}
|
||||||
|
|
||||||
|
function Write-Err {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$Message
|
||||||
|
)
|
||||||
|
Write-Host "ERROR: $Message" -ForegroundColor Red
|
||||||
|
}
|
||||||
|
|
||||||
|
function Validate-Environment {
|
||||||
|
if (-not $CURRENT_BRANCH) {
|
||||||
|
Write-Err 'Unable to determine current feature'
|
||||||
|
if ($HAS_GIT) { Write-Info "Make sure you're on a feature branch" } else { Write-Info 'Set SPECIFY_FEATURE environment variable or create a feature first' }
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
if (-not (Test-Path $NEW_PLAN)) {
|
||||||
|
Write-Err "No plan.md found at $NEW_PLAN"
|
||||||
|
Write-Info 'Ensure you are working on a feature with a corresponding spec directory'
|
||||||
|
if (-not $HAS_GIT) { Write-Info 'Use: $env:SPECIFY_FEATURE=your-feature-name or create a new feature first' }
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
if (-not (Test-Path $TEMPLATE_FILE)) {
|
||||||
|
Write-Err "Template file not found at $TEMPLATE_FILE"
|
||||||
|
Write-Info 'Run specify init to scaffold .specify/templates, or add agent-file-template.md there.'
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Extract-PlanField {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$FieldPattern,
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$PlanFile
|
||||||
|
)
|
||||||
|
if (-not (Test-Path $PlanFile)) { return '' }
|
||||||
|
# Lines like **Language/Version**: Python 3.12
|
||||||
|
$regex = "^\*\*$([Regex]::Escape($FieldPattern))\*\*: (.+)$"
|
||||||
|
Get-Content -LiteralPath $PlanFile -Encoding utf8 | ForEach-Object {
|
||||||
|
if ($_ -match $regex) {
|
||||||
|
$val = $Matches[1].Trim()
|
||||||
|
if ($val -notin @('NEEDS CLARIFICATION','N/A')) { return $val }
|
||||||
|
}
|
||||||
|
} | Select-Object -First 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function Parse-PlanData {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$PlanFile
|
||||||
|
)
|
||||||
|
if (-not (Test-Path $PlanFile)) { Write-Err "Plan file not found: $PlanFile"; return $false }
|
||||||
|
Write-Info "Parsing plan data from $PlanFile"
|
||||||
|
$script:NEW_LANG = Extract-PlanField -FieldPattern 'Language/Version' -PlanFile $PlanFile
|
||||||
|
$script:NEW_FRAMEWORK = Extract-PlanField -FieldPattern 'Primary Dependencies' -PlanFile $PlanFile
|
||||||
|
$script:NEW_DB = Extract-PlanField -FieldPattern 'Storage' -PlanFile $PlanFile
|
||||||
|
$script:NEW_PROJECT_TYPE = Extract-PlanField -FieldPattern 'Project Type' -PlanFile $PlanFile
|
||||||
|
|
||||||
|
if ($NEW_LANG) { Write-Info "Found language: $NEW_LANG" } else { Write-WarningMsg 'No language information found in plan' }
|
||||||
|
if ($NEW_FRAMEWORK) { Write-Info "Found framework: $NEW_FRAMEWORK" }
|
||||||
|
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Info "Found database: $NEW_DB" }
|
||||||
|
if ($NEW_PROJECT_TYPE) { Write-Info "Found project type: $NEW_PROJECT_TYPE" }
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
function Format-TechnologyStack {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$false)]
|
||||||
|
[string]$Lang,
|
||||||
|
[Parameter(Mandatory=$false)]
|
||||||
|
[string]$Framework
|
||||||
|
)
|
||||||
|
$parts = @()
|
||||||
|
if ($Lang -and $Lang -ne 'NEEDS CLARIFICATION') { $parts += $Lang }
|
||||||
|
if ($Framework -and $Framework -notin @('NEEDS CLARIFICATION','N/A')) { $parts += $Framework }
|
||||||
|
if (-not $parts) { return '' }
|
||||||
|
return ($parts -join ' + ')
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-ProjectStructure {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$false)]
|
||||||
|
[string]$ProjectType
|
||||||
|
)
|
||||||
|
if ($ProjectType -match 'web') { return "backend/`nfrontend/`ntests/" } else { return "src/`ntests/" }
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-CommandsForLanguage {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$false)]
|
||||||
|
[string]$Lang
|
||||||
|
)
|
||||||
|
switch -Regex ($Lang) {
|
||||||
|
'Python' { return "cd src; pytest; ruff check ." }
|
||||||
|
'Rust' { return "cargo test; cargo clippy" }
|
||||||
|
'JavaScript|TypeScript' { return "npm test; npm run lint" }
|
||||||
|
default { return "# Add commands for $Lang" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Get-LanguageConventions {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$false)]
|
||||||
|
[string]$Lang
|
||||||
|
)
|
||||||
|
if ($Lang) { "${Lang}: Follow standard conventions" } else { 'General: Follow standard conventions' }
|
||||||
|
}
|
||||||
|
|
||||||
|
function New-AgentFile {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$TargetFile,
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$ProjectName,
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[datetime]$Date
|
||||||
|
)
|
||||||
|
if (-not (Test-Path $TEMPLATE_FILE)) { Write-Err "Template not found at $TEMPLATE_FILE"; return $false }
|
||||||
|
$temp = New-TemporaryFile
|
||||||
|
Copy-Item -LiteralPath $TEMPLATE_FILE -Destination $temp -Force
|
||||||
|
|
||||||
|
$projectStructure = Get-ProjectStructure -ProjectType $NEW_PROJECT_TYPE
|
||||||
|
$commands = Get-CommandsForLanguage -Lang $NEW_LANG
|
||||||
|
$languageConventions = Get-LanguageConventions -Lang $NEW_LANG
|
||||||
|
|
||||||
|
$escaped_lang = $NEW_LANG
|
||||||
|
$escaped_framework = $NEW_FRAMEWORK
|
||||||
|
$escaped_branch = $CURRENT_BRANCH
|
||||||
|
|
||||||
|
$content = Get-Content -LiteralPath $temp -Raw -Encoding utf8
|
||||||
|
$content = $content -replace '\[PROJECT NAME\]',$ProjectName
|
||||||
|
$content = $content -replace '\[DATE\]',$Date.ToString('yyyy-MM-dd')
|
||||||
|
|
||||||
|
# Build the technology stack string safely
|
||||||
|
$techStackForTemplate = ""
|
||||||
|
if ($escaped_lang -and $escaped_framework) {
|
||||||
|
$techStackForTemplate = "- $escaped_lang + $escaped_framework ($escaped_branch)"
|
||||||
|
} elseif ($escaped_lang) {
|
||||||
|
$techStackForTemplate = "- $escaped_lang ($escaped_branch)"
|
||||||
|
} elseif ($escaped_framework) {
|
||||||
|
$techStackForTemplate = "- $escaped_framework ($escaped_branch)"
|
||||||
|
}
|
||||||
|
|
||||||
|
$content = $content -replace '\[EXTRACTED FROM ALL PLAN.MD FILES\]',$techStackForTemplate
|
||||||
|
# For project structure we manually embed (keep newlines)
|
||||||
|
$escapedStructure = [Regex]::Escape($projectStructure)
|
||||||
|
$content = $content -replace '\[ACTUAL STRUCTURE FROM PLANS\]',$escapedStructure
|
||||||
|
# Replace escaped newlines placeholder after all replacements
|
||||||
|
$content = $content -replace '\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]',$commands
|
||||||
|
$content = $content -replace '\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]',$languageConventions
|
||||||
|
|
||||||
|
# Build the recent changes string safely
|
||||||
|
$recentChangesForTemplate = ""
|
||||||
|
if ($escaped_lang -and $escaped_framework) {
|
||||||
|
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang} + ${escaped_framework}"
|
||||||
|
} elseif ($escaped_lang) {
|
||||||
|
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang}"
|
||||||
|
} elseif ($escaped_framework) {
|
||||||
|
$recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_framework}"
|
||||||
|
}
|
||||||
|
|
||||||
|
$content = $content -replace '\[LAST 3 FEATURES AND WHAT THEY ADDED\]',$recentChangesForTemplate
|
||||||
|
# Convert literal \n sequences introduced by Escape to real newlines
|
||||||
|
$content = $content -replace '\\n',[Environment]::NewLine
|
||||||
|
|
||||||
|
$parent = Split-Path -Parent $TargetFile
|
||||||
|
if (-not (Test-Path $parent)) { New-Item -ItemType Directory -Path $parent | Out-Null }
|
||||||
|
Set-Content -LiteralPath $TargetFile -Value $content -NoNewline -Encoding utf8
|
||||||
|
Remove-Item $temp -Force
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
function Update-ExistingAgentFile {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$TargetFile,
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[datetime]$Date
|
||||||
|
)
|
||||||
|
if (-not (Test-Path $TargetFile)) { return (New-AgentFile -TargetFile $TargetFile -ProjectName (Split-Path $REPO_ROOT -Leaf) -Date $Date) }
|
||||||
|
|
||||||
|
$techStack = Format-TechnologyStack -Lang $NEW_LANG -Framework $NEW_FRAMEWORK
|
||||||
|
$newTechEntries = @()
|
||||||
|
if ($techStack) {
|
||||||
|
$escapedTechStack = [Regex]::Escape($techStack)
|
||||||
|
if (-not (Select-String -Pattern $escapedTechStack -Path $TargetFile -Quiet)) {
|
||||||
|
$newTechEntries += "- $techStack ($CURRENT_BRANCH)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) {
|
||||||
|
$escapedDB = [Regex]::Escape($NEW_DB)
|
||||||
|
if (-not (Select-String -Pattern $escapedDB -Path $TargetFile -Quiet)) {
|
||||||
|
$newTechEntries += "- $NEW_DB ($CURRENT_BRANCH)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$newChangeEntry = ''
|
||||||
|
if ($techStack) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${techStack}" }
|
||||||
|
elseif ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${NEW_DB}" }
|
||||||
|
|
||||||
|
$lines = Get-Content -LiteralPath $TargetFile -Encoding utf8
|
||||||
|
$output = New-Object System.Collections.Generic.List[string]
|
||||||
|
$inTech = $false; $inChanges = $false; $techAdded = $false; $changeAdded = $false; $existingChanges = 0
|
||||||
|
|
||||||
|
for ($i=0; $i -lt $lines.Count; $i++) {
|
||||||
|
$line = $lines[$i]
|
||||||
|
if ($line -eq '## Active Technologies') {
|
||||||
|
$output.Add($line)
|
||||||
|
$inTech = $true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ($inTech -and $line -match '^##\s') {
|
||||||
|
if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true }
|
||||||
|
$output.Add($line); $inTech = $false; continue
|
||||||
|
}
|
||||||
|
if ($inTech -and [string]::IsNullOrWhiteSpace($line)) {
|
||||||
|
if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true }
|
||||||
|
$output.Add($line); continue
|
||||||
|
}
|
||||||
|
if ($line -eq '## Recent Changes') {
|
||||||
|
$output.Add($line)
|
||||||
|
if ($newChangeEntry) { $output.Add($newChangeEntry); $changeAdded = $true }
|
||||||
|
$inChanges = $true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ($inChanges -and $line -match '^##\s') { $output.Add($line); $inChanges = $false; continue }
|
||||||
|
if ($inChanges -and $line -match '^- ') {
|
||||||
|
if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ }
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') {
|
||||||
|
$output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd')))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
$output.Add($line)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Post-loop check: if we're still in the Active Technologies section and haven't added new entries
|
||||||
|
if ($inTech -and -not $techAdded -and $newTechEntries.Count -gt 0) {
|
||||||
|
$newTechEntries | ForEach-Object { $output.Add($_) }
|
||||||
|
}
|
||||||
|
|
||||||
|
Set-Content -LiteralPath $TargetFile -Value ($output -join [Environment]::NewLine) -Encoding utf8
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
function Update-AgentFile {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$TargetFile,
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$AgentName
|
||||||
|
)
|
||||||
|
if (-not $TargetFile -or -not $AgentName) { Write-Err 'Update-AgentFile requires TargetFile and AgentName'; return $false }
|
||||||
|
Write-Info "Updating $AgentName context file: $TargetFile"
|
||||||
|
$projectName = Split-Path $REPO_ROOT -Leaf
|
||||||
|
$date = Get-Date
|
||||||
|
|
||||||
|
$dir = Split-Path -Parent $TargetFile
|
||||||
|
if (-not (Test-Path $dir)) { New-Item -ItemType Directory -Path $dir | Out-Null }
|
||||||
|
|
||||||
|
if (-not (Test-Path $TargetFile)) {
|
||||||
|
if (New-AgentFile -TargetFile $TargetFile -ProjectName $projectName -Date $date) { Write-Success "Created new $AgentName context file" } else { Write-Err 'Failed to create new agent file'; return $false }
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
if (Update-ExistingAgentFile -TargetFile $TargetFile -Date $date) { Write-Success "Updated existing $AgentName context file" } else { Write-Err 'Failed to update agent file'; return $false }
|
||||||
|
} catch {
|
||||||
|
Write-Err "Cannot access or update existing file: $TargetFile. $_"
|
||||||
|
return $false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return $true
|
||||||
|
}
|
||||||
|
|
||||||
|
function Update-SpecificAgent {
|
||||||
|
param(
|
||||||
|
[Parameter(Mandatory=$true)]
|
||||||
|
[string]$Type
|
||||||
|
)
|
||||||
|
switch ($Type) {
|
||||||
|
'claude' { Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code' }
|
||||||
|
'gemini' { Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI' }
|
||||||
|
'copilot' { Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot' }
|
||||||
|
'cursor-agent' { Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE' }
|
||||||
|
'qwen' { Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code' }
|
||||||
|
'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' }
|
||||||
|
'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' }
|
||||||
|
'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' }
|
||||||
|
'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' }
|
||||||
|
'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' }
|
||||||
|
'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' }
|
||||||
|
'codebuddy' { Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI' }
|
||||||
|
'qodercli' { Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI' }
|
||||||
|
'amp' { Update-AgentFile -TargetFile $AMP_FILE -AgentName 'Amp' }
|
||||||
|
'shai' { Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI' }
|
||||||
|
'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' }
|
||||||
|
'agy' { Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity' }
|
||||||
|
'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' }
|
||||||
|
'generic' { Write-Info 'Generic agent: no predefined context file. Use the agent-specific update script for your agent.' }
|
||||||
|
default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli|generic'; return $false }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Update-AllExistingAgents {
|
||||||
|
$found = $false
|
||||||
|
$ok = $true
|
||||||
|
if (Test-Path $CLAUDE_FILE) { if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $GEMINI_FILE) { if (-not (Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $COPILOT_FILE) { if (-not (Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $CURSOR_FILE) { if (-not (Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $QODER_FILE) { if (-not (Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $SHAI_FILE) { if (-not (Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $AGY_FILE) { if (-not (Update-AgentFile -TargetFile $AGY_FILE -AgentName 'Antigravity')) { $ok = $false }; $found = $true }
|
||||||
|
if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true }
|
||||||
|
if (-not $found) {
|
||||||
|
Write-Info 'No existing agent files found, creating default Claude file...'
|
||||||
|
if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }
|
||||||
|
}
|
||||||
|
return $ok
|
||||||
|
}
|
||||||
|
|
||||||
|
function Print-Summary {
|
||||||
|
Write-Host ''
|
||||||
|
Write-Info 'Summary of changes:'
|
||||||
|
if ($NEW_LANG) { Write-Host " - Added language: $NEW_LANG" }
|
||||||
|
if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" }
|
||||||
|
if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" }
|
||||||
|
Write-Host ''
|
||||||
|
Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|agy|bob|qodercli|generic]'
|
||||||
|
}
|
||||||
|
|
||||||
|
function Main {
|
||||||
|
Validate-Environment
|
||||||
|
Write-Info "=== Updating agent context files for feature $CURRENT_BRANCH ==="
|
||||||
|
if (-not (Parse-PlanData -PlanFile $NEW_PLAN)) { Write-Err 'Failed to parse plan data'; exit 1 }
|
||||||
|
$success = $true
|
||||||
|
if ($AgentType) {
|
||||||
|
Write-Info "Updating specific agent: $AgentType"
|
||||||
|
if (-not (Update-SpecificAgent -Type $AgentType)) { $success = $false }
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Write-Info 'No agent specified, updating all existing agent files...'
|
||||||
|
if (-not (Update-AllExistingAgents)) { $success = $false }
|
||||||
|
}
|
||||||
|
Print-Summary
|
||||||
|
if ($success) { Write-Success 'Agent context update completed successfully'; exit 0 } else { Write-Err 'Agent context update completed with errors'; exit 1 }
|
||||||
|
}
|
||||||
|
|
||||||
|
Main
|
||||||
|
|
||||||
28
.specify/templates/agent-file-template.md
Normal file
28
.specify/templates/agent-file-template.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# [PROJECT NAME] Development Guidelines
|
||||||
|
|
||||||
|
Auto-generated from all feature plans. Last updated: [DATE]
|
||||||
|
|
||||||
|
## Active Technologies
|
||||||
|
|
||||||
|
[EXTRACTED FROM ALL PLAN.MD FILES]
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```text
|
||||||
|
[ACTUAL STRUCTURE FROM PLANS]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]
|
||||||
|
|
||||||
|
## Recent Changes
|
||||||
|
|
||||||
|
[LAST 3 FEATURES AND WHAT THEY ADDED]
|
||||||
|
|
||||||
|
<!-- MANUAL ADDITIONS START -->
|
||||||
|
<!-- MANUAL ADDITIONS END -->
|
||||||
40
.specify/templates/checklist-template.md
Normal file
40
.specify/templates/checklist-template.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# [CHECKLIST TYPE] Checklist: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Purpose**: [Brief description of what this checklist covers]
|
||||||
|
**Created**: [DATE]
|
||||||
|
**Feature**: [Link to spec.md or relevant documentation]
|
||||||
|
|
||||||
|
**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements.
|
||||||
|
|
||||||
|
<!--
|
||||||
|
============================================================================
|
||||||
|
IMPORTANT: The checklist items below are SAMPLE ITEMS for illustration only.
|
||||||
|
|
||||||
|
The /speckit.checklist command MUST replace these with actual items based on:
|
||||||
|
- User's specific checklist request
|
||||||
|
- Feature requirements from spec.md
|
||||||
|
- Technical context from plan.md
|
||||||
|
- Implementation details from tasks.md
|
||||||
|
|
||||||
|
DO NOT keep these sample items in the generated checklist file.
|
||||||
|
============================================================================
|
||||||
|
-->
|
||||||
|
|
||||||
|
## [Category 1]
|
||||||
|
|
||||||
|
- [ ] CHK001 First checklist item with clear action
|
||||||
|
- [ ] CHK002 Second checklist item
|
||||||
|
- [ ] CHK003 Third checklist item
|
||||||
|
|
||||||
|
## [Category 2]
|
||||||
|
|
||||||
|
- [ ] CHK004 Another category item
|
||||||
|
- [ ] CHK005 Item with specific criteria
|
||||||
|
- [ ] CHK006 Final item in this category
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Check items off as completed: `[x]`
|
||||||
|
- Add comments or findings inline
|
||||||
|
- Link to relevant resources or documentation
|
||||||
|
- Items are numbered sequentially for easy reference
|
||||||
50
.specify/templates/constitution-template.md
Normal file
50
.specify/templates/constitution-template.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# [PROJECT_NAME] Constitution
|
||||||
|
<!-- Example: Spec Constitution, TaskFlow Constitution, etc. -->
|
||||||
|
|
||||||
|
## Core Principles
|
||||||
|
|
||||||
|
### [PRINCIPLE_1_NAME]
|
||||||
|
<!-- Example: I. Library-First -->
|
||||||
|
[PRINCIPLE_1_DESCRIPTION]
|
||||||
|
<!-- Example: Every feature starts as a standalone library; Libraries must be self-contained, independently testable, documented; Clear purpose required - no organizational-only libraries -->
|
||||||
|
|
||||||
|
### [PRINCIPLE_2_NAME]
|
||||||
|
<!-- Example: II. CLI Interface -->
|
||||||
|
[PRINCIPLE_2_DESCRIPTION]
|
||||||
|
<!-- Example: Every library exposes functionality via CLI; Text in/out protocol: stdin/args → stdout, errors → stderr; Support JSON + human-readable formats -->
|
||||||
|
|
||||||
|
### [PRINCIPLE_3_NAME]
|
||||||
|
<!-- Example: III. Test-First (NON-NEGOTIABLE) -->
|
||||||
|
[PRINCIPLE_3_DESCRIPTION]
|
||||||
|
<!-- Example: TDD mandatory: Tests written → User approved → Tests fail → Then implement; Red-Green-Refactor cycle strictly enforced -->
|
||||||
|
|
||||||
|
### [PRINCIPLE_4_NAME]
|
||||||
|
<!-- Example: IV. Integration Testing -->
|
||||||
|
[PRINCIPLE_4_DESCRIPTION]
|
||||||
|
<!-- Example: Focus areas requiring integration tests: New library contract tests, Contract changes, Inter-service communication, Shared schemas -->
|
||||||
|
|
||||||
|
### [PRINCIPLE_5_NAME]
|
||||||
|
<!-- Example: V. Observability, VI. Versioning & Breaking Changes, VII. Simplicity -->
|
||||||
|
[PRINCIPLE_5_DESCRIPTION]
|
||||||
|
<!-- Example: Text I/O ensures debuggability; Structured logging required; Or: MAJOR.MINOR.BUILD format; Or: Start simple, YAGNI principles -->
|
||||||
|
|
||||||
|
## [SECTION_2_NAME]
|
||||||
|
<!-- Example: Additional Constraints, Security Requirements, Performance Standards, etc. -->
|
||||||
|
|
||||||
|
[SECTION_2_CONTENT]
|
||||||
|
<!-- Example: Technology stack requirements, compliance standards, deployment policies, etc. -->
|
||||||
|
|
||||||
|
## [SECTION_3_NAME]
|
||||||
|
<!-- Example: Development Workflow, Review Process, Quality Gates, etc. -->
|
||||||
|
|
||||||
|
[SECTION_3_CONTENT]
|
||||||
|
<!-- Example: Code review requirements, testing gates, deployment approval process, etc. -->
|
||||||
|
|
||||||
|
## Governance
|
||||||
|
<!-- Example: Constitution supersedes all other practices; Amendments require documentation, approval, migration plan -->
|
||||||
|
|
||||||
|
[GOVERNANCE_RULES]
|
||||||
|
<!-- Example: All PRs/reviews must verify compliance; Complexity must be justified; Use [GUIDANCE_FILE] for runtime development guidance -->
|
||||||
|
|
||||||
|
**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE]
|
||||||
|
<!-- Example: Version: 2.1.1 | Ratified: 2025-06-13 | Last Amended: 2025-07-16 -->
|
||||||
104
.specify/templates/plan-template.md
Normal file
104
.specify/templates/plan-template.md
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
# Implementation Plan: [FEATURE]
|
||||||
|
|
||||||
|
**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
|
||||||
|
**Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
|
||||||
|
|
||||||
|
**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/plan-template.md` for the execution workflow.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
[Extract from feature spec: primary requirement + technical approach from research]
|
||||||
|
|
||||||
|
## Technical Context
|
||||||
|
|
||||||
|
<!--
|
||||||
|
ACTION REQUIRED: Replace the content in this section with the technical details
|
||||||
|
for the project. The structure here is presented in advisory capacity to guide
|
||||||
|
the iteration process.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION]
|
||||||
|
**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION]
|
||||||
|
**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
|
||||||
|
**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION]
|
||||||
|
**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION]
|
||||||
|
**Project Type**: [e.g., library/cli/web-service/mobile-app/compiler/desktop-app or NEEDS CLARIFICATION]
|
||||||
|
**Performance Goals**: [domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION]
|
||||||
|
**Constraints**: [domain-specific, e.g., <200ms p95, <100MB memory, offline-capable or NEEDS CLARIFICATION]
|
||||||
|
**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION]
|
||||||
|
|
||||||
|
## Constitution Check
|
||||||
|
|
||||||
|
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
|
||||||
|
|
||||||
|
[Gates determined based on constitution file]
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
### Documentation (this feature)
|
||||||
|
|
||||||
|
```text
|
||||||
|
specs/[###-feature]/
|
||||||
|
├── plan.md # This file (/speckit.plan command output)
|
||||||
|
├── research.md # Phase 0 output (/speckit.plan command)
|
||||||
|
├── data-model.md # Phase 1 output (/speckit.plan command)
|
||||||
|
├── quickstart.md # Phase 1 output (/speckit.plan command)
|
||||||
|
├── contracts/ # Phase 1 output (/speckit.plan command)
|
||||||
|
└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Source Code (repository root)
|
||||||
|
<!--
|
||||||
|
ACTION REQUIRED: Replace the placeholder tree below with the concrete layout
|
||||||
|
for this feature. Delete unused options and expand the chosen structure with
|
||||||
|
real paths (e.g., apps/admin, packages/something). The delivered plan must
|
||||||
|
not include Option labels.
|
||||||
|
-->
|
||||||
|
|
||||||
|
```text
|
||||||
|
# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT)
|
||||||
|
src/
|
||||||
|
├── models/
|
||||||
|
├── services/
|
||||||
|
├── cli/
|
||||||
|
└── lib/
|
||||||
|
|
||||||
|
tests/
|
||||||
|
├── contract/
|
||||||
|
├── integration/
|
||||||
|
└── unit/
|
||||||
|
|
||||||
|
# [REMOVE IF UNUSED] Option 2: Web application (when "frontend" + "backend" detected)
|
||||||
|
backend/
|
||||||
|
├── src/
|
||||||
|
│ ├── models/
|
||||||
|
│ ├── services/
|
||||||
|
│ └── api/
|
||||||
|
└── tests/
|
||||||
|
|
||||||
|
frontend/
|
||||||
|
├── src/
|
||||||
|
│ ├── components/
|
||||||
|
│ ├── pages/
|
||||||
|
│ └── services/
|
||||||
|
└── tests/
|
||||||
|
|
||||||
|
# [REMOVE IF UNUSED] Option 3: Mobile + API (when "iOS/Android" detected)
|
||||||
|
api/
|
||||||
|
└── [same as backend above]
|
||||||
|
|
||||||
|
ios/ or android/
|
||||||
|
└── [platform-specific structure: feature modules, UI flows, platform tests]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Structure Decision**: [Document the selected structure and reference the real
|
||||||
|
directories captured above]
|
||||||
|
|
||||||
|
## Complexity Tracking
|
||||||
|
|
||||||
|
> **Fill ONLY if Constitution Check has violations that must be justified**
|
||||||
|
|
||||||
|
| Violation | Why Needed | Simpler Alternative Rejected Because |
|
||||||
|
|-----------|------------|-------------------------------------|
|
||||||
|
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
|
||||||
|
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
|
||||||
115
.specify/templates/spec-template.md
Normal file
115
.specify/templates/spec-template.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Feature Specification: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Feature Branch**: `[###-feature-name]`
|
||||||
|
**Created**: [DATE]
|
||||||
|
**Status**: Draft
|
||||||
|
**Input**: User description: "$ARGUMENTS"
|
||||||
|
|
||||||
|
## User Scenarios & Testing *(mandatory)*
|
||||||
|
|
||||||
|
<!--
|
||||||
|
IMPORTANT: User stories should be PRIORITIZED as user journeys ordered by importance.
|
||||||
|
Each user story/journey must be INDEPENDENTLY TESTABLE - meaning if you implement just ONE of them,
|
||||||
|
you should still have a viable MVP (Minimum Viable Product) that delivers value.
|
||||||
|
|
||||||
|
Assign priorities (P1, P2, P3, etc.) to each story, where P1 is the most critical.
|
||||||
|
Think of each story as a standalone slice of functionality that can be:
|
||||||
|
- Developed independently
|
||||||
|
- Tested independently
|
||||||
|
- Deployed independently
|
||||||
|
- Demonstrated to users independently
|
||||||
|
-->
|
||||||
|
|
||||||
|
### User Story 1 - [Brief Title] (Priority: P1)
|
||||||
|
|
||||||
|
[Describe this user journey in plain language]
|
||||||
|
|
||||||
|
**Why this priority**: [Explain the value and why it has this priority level]
|
||||||
|
|
||||||
|
**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"]
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||||
|
2. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### User Story 2 - [Brief Title] (Priority: P2)
|
||||||
|
|
||||||
|
[Describe this user journey in plain language]
|
||||||
|
|
||||||
|
**Why this priority**: [Explain the value and why it has this priority level]
|
||||||
|
|
||||||
|
**Independent Test**: [Describe how this can be tested independently]
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### User Story 3 - [Brief Title] (Priority: P3)
|
||||||
|
|
||||||
|
[Describe this user journey in plain language]
|
||||||
|
|
||||||
|
**Why this priority**: [Explain the value and why it has this priority level]
|
||||||
|
|
||||||
|
**Independent Test**: [Describe how this can be tested independently]
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
[Add more user stories as needed, each with an assigned priority]
|
||||||
|
|
||||||
|
### Edge Cases
|
||||||
|
|
||||||
|
<!--
|
||||||
|
ACTION REQUIRED: The content in this section represents placeholders.
|
||||||
|
Fill them out with the right edge cases.
|
||||||
|
-->
|
||||||
|
|
||||||
|
- What happens when [boundary condition]?
|
||||||
|
- How does system handle [error scenario]?
|
||||||
|
|
||||||
|
## Requirements *(mandatory)*
|
||||||
|
|
||||||
|
<!--
|
||||||
|
ACTION REQUIRED: The content in this section represents placeholders.
|
||||||
|
Fill them out with the right functional requirements.
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Functional Requirements
|
||||||
|
|
||||||
|
- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"]
|
||||||
|
- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"]
|
||||||
|
- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"]
|
||||||
|
- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"]
|
||||||
|
- **FR-005**: System MUST [behavior, e.g., "log all security events"]
|
||||||
|
|
||||||
|
*Example of marking unclear requirements:*
|
||||||
|
|
||||||
|
- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?]
|
||||||
|
- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified]
|
||||||
|
|
||||||
|
### Key Entities *(include if feature involves data)*
|
||||||
|
|
||||||
|
- **[Entity 1]**: [What it represents, key attributes without implementation]
|
||||||
|
- **[Entity 2]**: [What it represents, relationships to other entities]
|
||||||
|
|
||||||
|
## Success Criteria *(mandatory)*
|
||||||
|
|
||||||
|
<!--
|
||||||
|
ACTION REQUIRED: Define measurable success criteria.
|
||||||
|
These must be technology-agnostic and measurable.
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Measurable Outcomes
|
||||||
|
|
||||||
|
- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"]
|
||||||
|
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
|
||||||
|
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
|
||||||
|
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
|
||||||
251
.specify/templates/tasks-template.md
Normal file
251
.specify/templates/tasks-template.md
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
description: "Task list template for feature implementation"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Tasks: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Input**: Design documents from `/specs/[###-feature-name]/`
|
||||||
|
**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/
|
||||||
|
|
||||||
|
**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
|
||||||
|
|
||||||
|
**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
|
||||||
|
|
||||||
|
## Format: `[ID] [P?] [Story] Description`
|
||||||
|
|
||||||
|
- **[P]**: Can run in parallel (different files, no dependencies)
|
||||||
|
- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
|
||||||
|
- Include exact file paths in descriptions
|
||||||
|
|
||||||
|
## Path Conventions
|
||||||
|
|
||||||
|
- **Single project**: `src/`, `tests/` at repository root
|
||||||
|
- **Web app**: `backend/src/`, `frontend/src/`
|
||||||
|
- **Mobile**: `api/src/`, `ios/src/` or `android/src/`
|
||||||
|
- Paths shown below assume single project - adjust based on plan.md structure
|
||||||
|
|
||||||
|
<!--
|
||||||
|
============================================================================
|
||||||
|
IMPORTANT: The tasks below are SAMPLE TASKS for illustration purposes only.
|
||||||
|
|
||||||
|
The /speckit.tasks command MUST replace these with actual tasks based on:
|
||||||
|
- User stories from spec.md (with their priorities P1, P2, P3...)
|
||||||
|
- Feature requirements from plan.md
|
||||||
|
- Entities from data-model.md
|
||||||
|
- Endpoints from contracts/
|
||||||
|
|
||||||
|
Tasks MUST be organized by user story so each story can be:
|
||||||
|
- Implemented independently
|
||||||
|
- Tested independently
|
||||||
|
- Delivered as an MVP increment
|
||||||
|
|
||||||
|
DO NOT keep these sample tasks in the generated tasks.md file.
|
||||||
|
============================================================================
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Phase 1: Setup (Shared Infrastructure)
|
||||||
|
|
||||||
|
**Purpose**: Project initialization and basic structure
|
||||||
|
|
||||||
|
- [ ] T001 Create project structure per implementation plan
|
||||||
|
- [ ] T002 Initialize [language] project with [framework] dependencies
|
||||||
|
- [ ] T003 [P] Configure linting and formatting tools
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Foundational (Blocking Prerequisites)
|
||||||
|
|
||||||
|
**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented
|
||||||
|
|
||||||
|
**⚠️ CRITICAL**: No user story work can begin until this phase is complete
|
||||||
|
|
||||||
|
Examples of foundational tasks (adjust based on your project):
|
||||||
|
|
||||||
|
- [ ] T004 Setup database schema and migrations framework
|
||||||
|
- [ ] T005 [P] Implement authentication/authorization framework
|
||||||
|
- [ ] T006 [P] Setup API routing and middleware structure
|
||||||
|
- [ ] T007 Create base models/entities that all stories depend on
|
||||||
|
- [ ] T008 Configure error handling and logging infrastructure
|
||||||
|
- [ ] T009 Setup environment configuration management
|
||||||
|
|
||||||
|
**Checkpoint**: Foundation ready - user story implementation can now begin in parallel
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP
|
||||||
|
|
||||||
|
**Goal**: [Brief description of what this story delivers]
|
||||||
|
|
||||||
|
**Independent Test**: [How to verify this story works on its own]
|
||||||
|
|
||||||
|
### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️
|
||||||
|
|
||||||
|
> **NOTE: Write these tests FIRST, ensure they FAIL before implementation**
|
||||||
|
|
||||||
|
- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test_[name].py
|
||||||
|
- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test_[name].py
|
||||||
|
|
||||||
|
### Implementation for User Story 1
|
||||||
|
|
||||||
|
- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py
|
||||||
|
- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py
|
||||||
|
- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013)
|
||||||
|
- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py
|
||||||
|
- [ ] T016 [US1] Add validation and error handling
|
||||||
|
- [ ] T017 [US1] Add logging for user story 1 operations
|
||||||
|
|
||||||
|
**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: User Story 2 - [Title] (Priority: P2)
|
||||||
|
|
||||||
|
**Goal**: [Brief description of what this story delivers]
|
||||||
|
|
||||||
|
**Independent Test**: [How to verify this story works on its own]
|
||||||
|
|
||||||
|
### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️
|
||||||
|
|
||||||
|
- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test_[name].py
|
||||||
|
- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test_[name].py
|
||||||
|
|
||||||
|
### Implementation for User Story 2
|
||||||
|
|
||||||
|
- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py
|
||||||
|
- [ ] T021 [US2] Implement [Service] in src/services/[service].py
|
||||||
|
- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py
|
||||||
|
- [ ] T023 [US2] Integrate with User Story 1 components (if needed)
|
||||||
|
|
||||||
|
**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: User Story 3 - [Title] (Priority: P3)
|
||||||
|
|
||||||
|
**Goal**: [Brief description of what this story delivers]
|
||||||
|
|
||||||
|
**Independent Test**: [How to verify this story works on its own]
|
||||||
|
|
||||||
|
### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️
|
||||||
|
|
||||||
|
- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test_[name].py
|
||||||
|
- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test_[name].py
|
||||||
|
|
||||||
|
### Implementation for User Story 3
|
||||||
|
|
||||||
|
- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py
|
||||||
|
- [ ] T027 [US3] Implement [Service] in src/services/[service].py
|
||||||
|
- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py
|
||||||
|
|
||||||
|
**Checkpoint**: All user stories should now be independently functional
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
[Add more user story phases as needed, following the same pattern]
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase N: Polish & Cross-Cutting Concerns
|
||||||
|
|
||||||
|
**Purpose**: Improvements that affect multiple user stories
|
||||||
|
|
||||||
|
- [ ] TXXX [P] Documentation updates in docs/
|
||||||
|
- [ ] TXXX Code cleanup and refactoring
|
||||||
|
- [ ] TXXX Performance optimization across all stories
|
||||||
|
- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/
|
||||||
|
- [ ] TXXX Security hardening
|
||||||
|
- [ ] TXXX Run quickstart.md validation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Dependencies & Execution Order
|
||||||
|
|
||||||
|
### Phase Dependencies
|
||||||
|
|
||||||
|
- **Setup (Phase 1)**: No dependencies - can start immediately
|
||||||
|
- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories
|
||||||
|
- **User Stories (Phase 3+)**: All depend on Foundational phase completion
|
||||||
|
- User stories can then proceed in parallel (if staffed)
|
||||||
|
- Or sequentially in priority order (P1 → P2 → P3)
|
||||||
|
- **Polish (Final Phase)**: Depends on all desired user stories being complete
|
||||||
|
|
||||||
|
### User Story Dependencies
|
||||||
|
|
||||||
|
- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories
|
||||||
|
- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable
|
||||||
|
- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable
|
||||||
|
|
||||||
|
### Within Each User Story
|
||||||
|
|
||||||
|
- Tests (if included) MUST be written and FAIL before implementation
|
||||||
|
- Models before services
|
||||||
|
- Services before endpoints
|
||||||
|
- Core implementation before integration
|
||||||
|
- Story complete before moving to next priority
|
||||||
|
|
||||||
|
### Parallel Opportunities
|
||||||
|
|
||||||
|
- All Setup tasks marked [P] can run in parallel
|
||||||
|
- All Foundational tasks marked [P] can run in parallel (within Phase 2)
|
||||||
|
- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows)
|
||||||
|
- All tests for a user story marked [P] can run in parallel
|
||||||
|
- Models within a story marked [P] can run in parallel
|
||||||
|
- Different user stories can be worked on in parallel by different team members
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Parallel Example: User Story 1
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Launch all tests for User Story 1 together (if tests requested):
|
||||||
|
Task: "Contract test for [endpoint] in tests/contract/test_[name].py"
|
||||||
|
Task: "Integration test for [user journey] in tests/integration/test_[name].py"
|
||||||
|
|
||||||
|
# Launch all models for User Story 1 together:
|
||||||
|
Task: "Create [Entity1] model in src/models/[entity1].py"
|
||||||
|
Task: "Create [Entity2] model in src/models/[entity2].py"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### MVP First (User Story 1 Only)
|
||||||
|
|
||||||
|
1. Complete Phase 1: Setup
|
||||||
|
2. Complete Phase 2: Foundational (CRITICAL - blocks all stories)
|
||||||
|
3. Complete Phase 3: User Story 1
|
||||||
|
4. **STOP and VALIDATE**: Test User Story 1 independently
|
||||||
|
5. Deploy/demo if ready
|
||||||
|
|
||||||
|
### Incremental Delivery
|
||||||
|
|
||||||
|
1. Complete Setup + Foundational → Foundation ready
|
||||||
|
2. Add User Story 1 → Test independently → Deploy/Demo (MVP!)
|
||||||
|
3. Add User Story 2 → Test independently → Deploy/Demo
|
||||||
|
4. Add User Story 3 → Test independently → Deploy/Demo
|
||||||
|
5. Each story adds value without breaking previous stories
|
||||||
|
|
||||||
|
### Parallel Team Strategy
|
||||||
|
|
||||||
|
With multiple developers:
|
||||||
|
|
||||||
|
1. Team completes Setup + Foundational together
|
||||||
|
2. Once Foundational is done:
|
||||||
|
- Developer A: User Story 1
|
||||||
|
- Developer B: User Story 2
|
||||||
|
- Developer C: User Story 3
|
||||||
|
3. Stories complete and integrate independently
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- [P] tasks = different files, no dependencies
|
||||||
|
- [Story] label maps task to specific user story for traceability
|
||||||
|
- Each user story should be independently completable and testable
|
||||||
|
- Verify tests fail before implementing
|
||||||
|
- Commit after each task or logical group
|
||||||
|
- Stop at any checkpoint to validate story independently
|
||||||
|
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
|
||||||
469
CLAUDE.md
Normal file
469
CLAUDE.md
Normal file
@@ -0,0 +1,469 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
An Actix-web REST API for serving images and videos from a filesystem with automatic thumbnail generation, EXIF extraction, tag organization, and a memories feature for browsing photos by date. Uses SQLite/Diesel ORM for data persistence and ffmpeg for video processing.
|
||||||
|
|
||||||
|
## Development Commands
|
||||||
|
|
||||||
|
### Building & Running
|
||||||
|
```bash
|
||||||
|
# Build for development
|
||||||
|
cargo build
|
||||||
|
|
||||||
|
# Build for release (uses thin LTO optimization)
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
# Run the server (requires .env file with DATABASE_URL, BASE_PATH, THUMBNAILS, VIDEO_PATH, BIND_URL, SECRET_KEY)
|
||||||
|
cargo run
|
||||||
|
|
||||||
|
# Run with specific log level
|
||||||
|
RUST_LOG=debug cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
```bash
|
||||||
|
# Run all tests (requires BASE_PATH in .env)
|
||||||
|
cargo test
|
||||||
|
|
||||||
|
# Run specific test
|
||||||
|
cargo test test_name
|
||||||
|
|
||||||
|
# Run tests with output
|
||||||
|
cargo test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database Migrations
|
||||||
|
```bash
|
||||||
|
# Install diesel CLI (one-time setup)
|
||||||
|
cargo install diesel_cli --no-default-features --features sqlite
|
||||||
|
|
||||||
|
# Create new migration
|
||||||
|
diesel migration generate migration_name
|
||||||
|
|
||||||
|
# Run migrations (also runs automatically on app startup)
|
||||||
|
diesel migration run
|
||||||
|
|
||||||
|
# Revert last migration
|
||||||
|
diesel migration revert
|
||||||
|
|
||||||
|
# Regenerate schema.rs after manual migration changes
|
||||||
|
diesel print-schema > src/database/schema.rs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Code Quality
|
||||||
|
```bash
|
||||||
|
# Format code
|
||||||
|
cargo fmt
|
||||||
|
|
||||||
|
# Run clippy linter
|
||||||
|
cargo clippy
|
||||||
|
|
||||||
|
# Fix automatically fixable issues
|
||||||
|
cargo fix
|
||||||
|
```
|
||||||
|
|
||||||
|
### Utility Binaries
|
||||||
|
```bash
|
||||||
|
# Two-phase cleanup: resolve missing files and validate file types
|
||||||
|
cargo run --bin cleanup_files -- --base-path /path/to/media --database-url ./database.db
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
**Layered Architecture:**
|
||||||
|
- **HTTP Layer** (`main.rs`): Route handlers for images, videos, metadata, tags, favorites, memories
|
||||||
|
- **Auth Layer** (`auth.rs`): JWT token validation, Claims extraction via FromRequest trait
|
||||||
|
- **Service Layer** (`files.rs`, `exif.rs`, `memories.rs`): Business logic for file operations and EXIF extraction
|
||||||
|
- **DAO Layer** (`database/mod.rs`): Trait-based data access (ExifDao, UserDao, FavoriteDao, TagDao)
|
||||||
|
- **Database Layer**: Diesel ORM with SQLite, schema in `database/schema.rs`
|
||||||
|
|
||||||
|
**Async Actor System (Actix):**
|
||||||
|
- `StreamActor`: Manages ffmpeg video processing lifecycle
|
||||||
|
- `VideoPlaylistManager`: Scans directories and queues videos
|
||||||
|
- `PlaylistGenerator`: Creates HLS playlists for video streaming
|
||||||
|
|
||||||
|
### Database Schema & Patterns
|
||||||
|
|
||||||
|
**Tables:**
|
||||||
|
- `users`: Authentication (id, username, password_hash)
|
||||||
|
- `favorites`: User-specific favorites (userid, path)
|
||||||
|
- `tags`: Custom labels with timestamps
|
||||||
|
- `tagged_photo`: Many-to-many photo-tag relationships
|
||||||
|
- `image_exif`: Rich metadata (file_path + 16 EXIF fields: camera, GPS, dates, exposure settings)
|
||||||
|
|
||||||
|
**DAO Pattern:**
|
||||||
|
All database access goes through trait-based DAOs (e.g., `ExifDao`, `SqliteExifDao`). Connection pooling uses `Arc<Mutex<SqliteConnection>>`. All DB operations are traced with OpenTelemetry in release builds.
|
||||||
|
|
||||||
|
**Key DAO Methods:**
|
||||||
|
- `store_exif()`, `get_exif()`, `get_exif_batch()`: EXIF CRUD operations
|
||||||
|
- `query_by_exif()`: Complex filtering by camera, GPS bounds, date ranges
|
||||||
|
- Batch operations minimize DB hits during file watching
|
||||||
|
|
||||||
|
### File Processing Pipeline
|
||||||
|
|
||||||
|
**Thumbnail Generation:**
|
||||||
|
1. Startup scan: Rayon parallel walk of BASE_PATH
|
||||||
|
2. Creates 200x200 thumbnails in THUMBNAILS directory (mirrors source structure)
|
||||||
|
3. Videos: extracts frame at 3-second mark via ffmpeg
|
||||||
|
4. Images: uses `image` crate for JPEG/PNG processing
|
||||||
|
5. RAW formats (NEF/CR2/ARW/DNG/etc.): the `image` crate can't decode RAW
|
||||||
|
pixel data, so the pipeline pulls an embedded JPEG preview instead. Fast
|
||||||
|
path is `exif::read_jpeg_at_ifd` against IFD0 (PRIMARY) and IFD1
|
||||||
|
(THUMBNAIL) — covers most older bodies and DNGs. Slow-path fallback shells
|
||||||
|
out to **`exiftool`** for `PreviewImage` / `JpgFromRaw` / `OtherImage`,
|
||||||
|
which reaches MakerNote / SubIFD-hosted previews kamadak-exif can't see
|
||||||
|
(e.g. Nikon's `PreviewIFD`, where modern Nikon bodies store the full-res
|
||||||
|
review JPEG). All candidates are pooled and the largest valid JPEG wins.
|
||||||
|
See `src/exif.rs::extract_embedded_jpeg_preview`.
|
||||||
|
|
||||||
|
**File Watching:**
|
||||||
|
Runs in background thread with two-tier strategy:
|
||||||
|
- **Quick scan** (default 60s): Recently modified files only
|
||||||
|
- **Full scan** (default 3600s): Comprehensive directory check
|
||||||
|
- Batch queries EXIF DB to detect new files
|
||||||
|
- Configurable via `WATCH_QUICK_INTERVAL_SECONDS` and `WATCH_FULL_INTERVAL_SECONDS`
|
||||||
|
|
||||||
|
**EXIF Extraction:**
|
||||||
|
- Uses `kamadak-exif` crate
|
||||||
|
- Supports: JPEG, TIFF, RAW (NEF, CR2, CR3), HEIF/HEIC, PNG, WebP
|
||||||
|
- Extracts: camera make/model, lens, dimensions, GPS coordinates, focal length, aperture, shutter speed, ISO, date taken
|
||||||
|
- Triggered on upload and during file watching
|
||||||
|
|
||||||
|
**File Upload Behavior:**
|
||||||
|
If file exists, appends timestamp to filename (`photo_1735124234.jpg`) to preserve history without overwrites.
|
||||||
|
|
||||||
|
### Authentication Flow
|
||||||
|
|
||||||
|
**Login:**
|
||||||
|
1. POST `/login` with username/password
|
||||||
|
2. Verify with `bcrypt::verify()` against password_hash
|
||||||
|
3. Generate JWT with claims: `{ sub: user_id, exp: 5_days_from_now }`
|
||||||
|
4. Sign with HS256 using `SECRET_KEY` environment variable
|
||||||
|
|
||||||
|
**Authorization:**
|
||||||
|
All protected endpoints extract `Claims` via `FromRequest` trait implementation. Token passed as `Authorization: Bearer <token>` header.
|
||||||
|
|
||||||
|
### API Structure
|
||||||
|
|
||||||
|
**Key Endpoint Patterns:**
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Image serving & upload
|
||||||
|
GET /image?path=...&size=...&format=...
|
||||||
|
POST /image (multipart file upload)
|
||||||
|
|
||||||
|
// Metadata & EXIF
|
||||||
|
GET /image/metadata?path=...
|
||||||
|
|
||||||
|
// Advanced search with filters
|
||||||
|
GET /photos?path=...&recursive=true&sort=DateTakenDesc&camera_make=Canon&gps_lat=...&gps_lon=...&gps_radius_km=10&date_from=...&date_to=...&tag_ids=1,2,3&media_type=Photo
|
||||||
|
|
||||||
|
// Video streaming (HLS)
|
||||||
|
POST /video/generate (creates .m3u8 playlist + .ts segments)
|
||||||
|
GET /video/stream?path=... (serves playlist)
|
||||||
|
|
||||||
|
// Tags
|
||||||
|
GET /image/tags/all
|
||||||
|
POST /image/tags (add tag to file)
|
||||||
|
DELETE /image/tags (remove tag from file)
|
||||||
|
POST /image/tags/batch (bulk tag updates)
|
||||||
|
|
||||||
|
// Memories (week-based grouping)
|
||||||
|
GET /memories?path=...&recursive=true
|
||||||
|
|
||||||
|
// AI Insights
|
||||||
|
POST /insights/generate (non-agentic single-shot)
|
||||||
|
POST /insights/generate/agentic (tool-calling loop; body: { file_path, backend?, model?, ... })
|
||||||
|
GET /insights?path=...&library=...
|
||||||
|
GET /insights/models (local Ollama models + capabilities)
|
||||||
|
GET /insights/openrouter/models (curated OpenRouter allowlist)
|
||||||
|
POST /insights/rate (thumbs up/down for training data)
|
||||||
|
|
||||||
|
// Insight Chat Continuation
|
||||||
|
POST /insights/chat (single-turn reply, non-streaming)
|
||||||
|
POST /insights/chat/stream (SSE: text / tool_call / tool_result / truncated / done)
|
||||||
|
GET /insights/chat/history?path=... (rendered transcript with tool invocations)
|
||||||
|
POST /insights/chat/rewind (truncate transcript at a rendered index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Request Types:**
|
||||||
|
- `FilesRequest`: Supports complex filtering (tags, EXIF fields, GPS radius, date ranges)
|
||||||
|
- `SortType`: Shuffle, NameAsc/Desc, TagCountAsc/Desc, DateTakenAsc/Desc
|
||||||
|
|
||||||
|
### Important Patterns
|
||||||
|
|
||||||
|
**Service Builder Pattern:**
|
||||||
|
Routes are registered via composable `ServiceBuilder` trait in `service.rs`. Allows modular feature addition.
|
||||||
|
|
||||||
|
**Path Validation:**
|
||||||
|
Always use `is_valid_full_path(&base_path, &requested_path, check_exists)` to prevent directory traversal attacks.
|
||||||
|
|
||||||
|
**File Type Detection:**
|
||||||
|
Centralized in `file_types.rs` with constants `IMAGE_EXTENSIONS` and `VIDEO_EXTENSIONS`. Provides both `Path` and `DirEntry` variants for performance.
|
||||||
|
|
||||||
|
**OpenTelemetry Tracing:**
|
||||||
|
All database operations and HTTP handlers wrapped in spans. In release builds, exports to OTLP endpoint via `OTLP_OTLS_ENDPOINT`. Debug builds use basic logger.
|
||||||
|
|
||||||
|
**Memory Exclusion:**
|
||||||
|
`PathExcluder` in `memories.rs` filters out directories from memories API via `EXCLUDED_DIRS` environment variable (comma-separated paths or substring patterns). The same excluder is applied to face-detection candidates (`face_watch::filter_excluded`) so junk directories like `@eaDir` / `.thumbnails` don't burn detect calls on Apollo.
|
||||||
|
|
||||||
|
### Face detection system
|
||||||
|
|
||||||
|
ImageApi owns the face data; Apollo (sibling repo) hosts the insightface inference service. Inference is triggered automatically by the file watcher and persisted into two tables:
|
||||||
|
|
||||||
|
- `persons(id, name UNIQUE COLLATE NOCASE, cover_face_id, entity_id, created_from_tag, notes, ...)` — operator-managed, name is the user-visible identity.
|
||||||
|
- `face_detections(id, library_id, content_hash, rel_path, bbox_*, embedding BLOB, confidence, source, person_id, status, model_version, ...)` — keyed on `content_hash` so a photo duplicated across libraries is detected once. Marker rows for `status IN ('no_faces','failed')` carry NULL bbox/embedding (CHECK constraint enforces this).
|
||||||
|
|
||||||
|
**Why content_hash and not (library_id, rel_path):** ties face data to the bytes, not the path. A backup mount that copies files from the primary library naturally inherits the existing detections without re-running inference.
|
||||||
|
|
||||||
|
**File-watch hook** (`src/main.rs::process_new_files`): for each photo with a populated `content_hash`, check `FaceDao::already_scanned(hash)`; if not, send bytes (or embedded JPEG preview for RAW via `exif::extract_embedded_jpeg_preview`) to Apollo's `/api/internal/faces/detect`. K=`FACE_DETECT_CONCURRENCY` (default 8) parallel calls per scan tick; Apollo serializes them via its single-worker GPU pool. `face_watch.rs` is the Tokio orchestration layer.
|
||||||
|
|
||||||
|
**Per-tick backlog drain** (also `src/main.rs`): two passes that run on every watcher tick regardless of quick-vs-full scan:
|
||||||
|
- `backfill_unhashed_backlog` — populates `image_exif.content_hash` for photos that arrived before the hash field was retroactive. Capped by `FACE_HASH_BACKFILL_MAX_PER_TICK` (default 2000); errors don't burn the cap.
|
||||||
|
- `process_face_backlog` — runs detection on photos that have a hash but no `face_detections` row. Capped by `FACE_BACKLOG_MAX_PER_TICK` (default 64). Selected via a SQL anti-join (`FaceDao::list_unscanned_candidates`); videos and EXCLUDED_DIRS paths filtered out client-side via `face_watch::filter_excluded` so they never reach Apollo.
|
||||||
|
|
||||||
|
**Auto-bind on detection:** when a photo carries a tag whose name matches a `persons.name` (case-insensitive), the new face binds automatically iff cosine similarity to the person's existing-face mean is ≥ `FACE_AUTOBIND_MIN_COS` (default 0.4). Persons with no existing faces bind unconditionally and the new face becomes the cover.
|
||||||
|
|
||||||
|
**Manual face create** (`POST /image/faces`): crops the image to the user-supplied bbox, applies EXIF orientation via `exif::apply_orientation` (the `image` crate hands raw pre-rotation pixels — without this, manually-drawn bboxes never resolved a face on re-detection), pads to ~50% of bbox dims (RetinaFace anchor scales need ~50% face-fill at det_size=640), then calls Apollo's embed endpoint. A `force` flag lets the operator save a face the detector couldn't see (e.g. profile shots, occluded faces) — the row gets a zero-vector embedding so it's manually-bound only and won't participate in clustering.
|
||||||
|
|
||||||
|
**Rerun preserves manual rows** (`POST /image/faces/{id}/rerun`): only `source='auto'` rows are deleted before re-running detection. `already_scanned` returns true on ANY row, so a photo whose only faces are manually drawn never auto-redetects.
|
||||||
|
|
||||||
|
Module map:
|
||||||
|
- `src/faces.rs` — `FaceDao` trait + `SqliteFaceDao` impl, route handlers for `/faces/*`, `/image/faces/*`, `/persons/*`. Mirror of `tags.rs` layout.
|
||||||
|
- `src/face_watch.rs` — Tokio orchestration for the file-watch detect pass; `filter_excluded` (PathExcluder + image-extension filter), `read_image_bytes_for_detect` (RAW preview fallback).
|
||||||
|
- `src/ai/face_client.rs` — HTTP client for Apollo's inference. Configured by `APOLLO_FACE_API_BASE_URL`, falls back to `APOLLO_API_BASE_URL`. Both unset → feature disabled, file-watch hook is a no-op.
|
||||||
|
- `migrations/2026-04-29-000000_add_faces/` — schema.
|
||||||
|
|
||||||
|
### Startup Sequence
|
||||||
|
|
||||||
|
1. Load `.env` file
|
||||||
|
2. Run embedded Diesel migrations
|
||||||
|
3. Spawn file watcher thread
|
||||||
|
4. Create initial thumbnails (parallel scan)
|
||||||
|
5. Generate video GIF thumbnails
|
||||||
|
6. Initialize AppState with Actix actors
|
||||||
|
7. Set up Prometheus metrics (`imageserver_image_total`, `imageserver_video_total`)
|
||||||
|
8. Scan directory for videos and queue HLS processing
|
||||||
|
9. Start HTTP server on `BIND_URL` + localhost:8088
|
||||||
|
|
||||||
|
## Testing Patterns
|
||||||
|
|
||||||
|
Tests require `BASE_PATH` environment variable. Many integration tests create temporary directories and files.
|
||||||
|
|
||||||
|
When testing database code:
|
||||||
|
- Use in-memory SQLite: `DATABASE_URL=":memory:"`
|
||||||
|
- Run migrations in test setup
|
||||||
|
- Clean up with `DROP TABLE` or use `#[serial]` from `serial_test` crate if parallel tests conflict
|
||||||
|
|
||||||
|
## Common Gotchas
|
||||||
|
|
||||||
|
**EXIF Date Parsing:**
|
||||||
|
Multiple formats supported (EXIF DateTime, ISO8601, Unix timestamp). Fallback chain attempts multiple parsers.
|
||||||
|
|
||||||
|
**Video Processing:**
|
||||||
|
ffmpeg processes run asynchronously via actors. Use `StreamActor` to track completion. HLS segments written to `VIDEO_PATH`.
|
||||||
|
|
||||||
|
**File Extensions:**
|
||||||
|
Extension detection is case-insensitive. Use `file_types.rs` helpers rather than manual string matching.
|
||||||
|
|
||||||
|
**Migration Workflow:**
|
||||||
|
After creating a migration, manually edit the SQL, then regenerate `schema.rs` with `diesel print-schema`. Migrations auto-run on startup via `embedded_migrations!()` macro.
|
||||||
|
|
||||||
|
**Path Absolutization:**
|
||||||
|
Use `path-absolutize` crate's `.absolutize()` method when converting user-provided paths to ensure they're within `BASE_PATH`.
|
||||||
|
|
||||||
|
## Required Environment Variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DATABASE_URL=./database.db # SQLite database path
|
||||||
|
BASE_PATH=/path/to/media # Root media directory
|
||||||
|
THUMBNAILS=/path/to/thumbnails # Thumbnail storage
|
||||||
|
VIDEO_PATH=/path/to/video/hls # HLS playlist output
|
||||||
|
GIFS_DIRECTORY=/path/to/gifs # Video GIF thumbnails
|
||||||
|
BIND_URL=0.0.0.0:8080 # Server binding
|
||||||
|
CORS_ALLOWED_ORIGINS=http://localhost:3000
|
||||||
|
SECRET_KEY=your-secret-key-here # JWT signing secret
|
||||||
|
RUST_LOG=info # Log level
|
||||||
|
EXCLUDED_DIRS=/private,/archive # Comma-separated paths to exclude from memories
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional:
|
||||||
|
```bash
|
||||||
|
WATCH_QUICK_INTERVAL_SECONDS=60 # Quick scan interval
|
||||||
|
WATCH_FULL_INTERVAL_SECONDS=3600 # Full scan interval
|
||||||
|
OTLP_OTLS_ENDPOINT=http://... # OpenTelemetry collector (release builds)
|
||||||
|
|
||||||
|
# AI Insights Configuration
|
||||||
|
OLLAMA_PRIMARY_URL=http://desktop:11434 # Primary Ollama server (e.g., desktop)
|
||||||
|
OLLAMA_FALLBACK_URL=http://server:11434 # Fallback Ollama server (optional, always-on)
|
||||||
|
OLLAMA_PRIMARY_MODEL=nemotron-3-nano:30b # Model for primary server (default: nemotron-3-nano:30b)
|
||||||
|
OLLAMA_FALLBACK_MODEL=llama3.2:3b # Model for fallback server (optional, uses primary if not set)
|
||||||
|
OLLAMA_REQUEST_TIMEOUT_SECONDS=120 # Per-request generation timeout (default 120). Increase for slow CPU-offloaded models.
|
||||||
|
SMS_API_URL=http://localhost:8000 # SMS message API endpoint (default: localhost:8000)
|
||||||
|
SMS_API_TOKEN=your-api-token # SMS API authentication token (optional)
|
||||||
|
|
||||||
|
# Apollo Places integration (optional). When set, photo-insight enrichment
|
||||||
|
# folds the user's personal place name (Home, Work, Cabin, ...) into the
|
||||||
|
# location string fed to the LLM, and the agentic loop gains a
|
||||||
|
# `get_personal_place_at` tool. Unset = legacy Nominatim-only path.
|
||||||
|
APOLLO_API_BASE_URL=http://apollo.lan:8000 # Base URL of the sibling Apollo backend
|
||||||
|
|
||||||
|
# Face inference (optional). Apollo also hosts the insightface inference
|
||||||
|
# service; ImageApi calls it from the file-watch hook (Phase 3) and from
|
||||||
|
# the manual face-create endpoint. Falls back to APOLLO_API_BASE_URL when
|
||||||
|
# unset (typical single-Apollo deploy). Both unset = feature disabled.
|
||||||
|
APOLLO_FACE_API_BASE_URL=http://apollo.lan:8000 # Override if face service runs separately
|
||||||
|
FACE_AUTOBIND_MIN_COS=0.4 # Phase 3: cosine-sim floor for tag-name auto-bind
|
||||||
|
FACE_DETECT_CONCURRENCY=8 # Phase 3: per-scan-tick parallel detect calls
|
||||||
|
FACE_DETECT_TIMEOUT_SEC=60 # reqwest client timeout (CPU inference can be slow)
|
||||||
|
|
||||||
|
# OpenRouter (Hybrid Backend) - keeps embeddings + vision local, routes chat to OpenRouter
|
||||||
|
OPENROUTER_API_KEY=sk-or-... # Required to enable hybrid backend
|
||||||
|
OPENROUTER_DEFAULT_MODEL=anthropic/claude-sonnet-4 # Used when client doesn't pick a model
|
||||||
|
OPENROUTER_ALLOWED_MODELS=openai/gpt-4o-mini,anthropic/claude-haiku-4-5,google/gemini-2.5-flash
|
||||||
|
# Curated allowlist exposed to clients via
|
||||||
|
# GET /insights/openrouter/models. Empty = no picker.
|
||||||
|
OPENROUTER_BASE_URL=https://openrouter.ai/api/v1 # Override base URL (optional)
|
||||||
|
OPENROUTER_EMBEDDING_MODEL=openai/text-embedding-3-small # Optional, embeddings stay local today
|
||||||
|
OPENROUTER_HTTP_REFERER=https://your-site.example # Optional attribution header
|
||||||
|
OPENROUTER_APP_TITLE=ImageApi # Optional attribution header
|
||||||
|
|
||||||
|
# Insight Chat Continuation
|
||||||
|
AGENTIC_CHAT_MAX_ITERATIONS=6 # Cap on tool-calling iterations per chat turn (default 6)
|
||||||
|
```
|
||||||
|
|
||||||
|
**AI Insights Fallback Behavior:**
|
||||||
|
- Primary server is tried first with its configured model (5-second connection timeout)
|
||||||
|
- On connection failure, automatically falls back to secondary server with its model (if configured)
|
||||||
|
- If `OLLAMA_FALLBACK_MODEL` not set, uses same model as primary server on fallback
|
||||||
|
- Total request timeout is 120 seconds to accommodate slow LLM inference
|
||||||
|
- Logs indicate which server and model was used (info level) and failover attempts (warn level)
|
||||||
|
- Backwards compatible: `OLLAMA_URL` and `OLLAMA_MODEL` still supported as fallbacks
|
||||||
|
|
||||||
|
**Model Discovery:**
|
||||||
|
The `OllamaClient` provides methods to query available models:
|
||||||
|
- `OllamaClient::list_models(url)` - Returns list of all models on a server
|
||||||
|
- `OllamaClient::is_model_available(url, model_name)` - Checks if a specific model exists
|
||||||
|
|
||||||
|
This allows runtime verification of model availability before generating insights.
|
||||||
|
|
||||||
|
**Hybrid Backend (OpenRouter):**
|
||||||
|
- Per-request opt-in via `backend=hybrid` on `POST /insights/generate/agentic`.
|
||||||
|
- Local Ollama still describes the image (vision); the description is inlined
|
||||||
|
into the chat prompt and the agentic loop runs on OpenRouter.
|
||||||
|
- `request.model` (if provided) overrides `OPENROUTER_DEFAULT_MODEL` for that
|
||||||
|
call. The mobile picker reads from `OPENROUTER_ALLOWED_MODELS`.
|
||||||
|
- No live capability precheck — the operator-curated allowlist is trusted.
|
||||||
|
A bad model id surfaces as a chat-call error.
|
||||||
|
- `GET /insights/openrouter/models` returns `{ models, default_model, configured }`
|
||||||
|
for client picker UIs.
|
||||||
|
|
||||||
|
**Insight Chat Continuation:**
|
||||||
|
|
||||||
|
After an agentic insight is generated, the full `Vec<ChatMessage>` transcript is
|
||||||
|
stored in `photo_insights.training_messages` and can be continued via the
|
||||||
|
chat endpoints. The `PhotoInsightResponse.has_training_messages` flag tells
|
||||||
|
clients whether chat is available for a given insight.
|
||||||
|
|
||||||
|
- `POST /insights/chat` runs one turn of the agentic loop against the replayed
|
||||||
|
history. Body: `{ file_path, library?, user_message, model?, backend?, num_ctx?,
|
||||||
|
temperature?, top_p?, top_k?, min_p?, max_iterations?, amend? }`.
|
||||||
|
- `POST /insights/chat/stream` is the SSE variant — same request body, response
|
||||||
|
is `text/event-stream` with events: `iteration_start`, `text` (delta), `tool_call`,
|
||||||
|
`tool_result`, `truncated`, `done`, plus a server-emitted `error_message` on
|
||||||
|
failure. Preferred by the mobile client for live tool-chip updates.
|
||||||
|
- `GET /insights/chat/history?path=...&library=...` returns the rendered
|
||||||
|
transcript. Each assistant message carries a `tools: [{name, arguments, result,
|
||||||
|
result_truncated?}]` array with the tool invocations that led up to it. Tool
|
||||||
|
results over 2000 chars are truncated with `result_truncated: true`.
|
||||||
|
- `POST /insights/chat/rewind` truncates the transcript at a given rendered
|
||||||
|
index (drops that message + any tool-call scaffolding that preceded it + all
|
||||||
|
later turns). Index 0 is protected. Used for "try again from here" flows.
|
||||||
|
|
||||||
|
Backend routing rules (matches agentic-insight generation):
|
||||||
|
- Stored `backend` on the insight row is authoritative by default.
|
||||||
|
- `request.backend` may override per-turn. `local -> hybrid` is rejected in
|
||||||
|
v1 (would require on-the-fly visual-description rewrite); `hybrid -> local`
|
||||||
|
replays verbatim since the description is already inlined as text.
|
||||||
|
- `request.model` overrides the chat model (an Ollama id in local mode, an
|
||||||
|
OpenRouter id in hybrid mode).
|
||||||
|
|
||||||
|
Persistence:
|
||||||
|
- Append mode (default): re-serialize the full history and `UPDATE` the same
|
||||||
|
row's `training_messages`.
|
||||||
|
- Amend mode (`amend: true`): regenerate the title, insert a new insight row
|
||||||
|
via `store_insight` (auto-flips prior rows' `is_current=false`). Response
|
||||||
|
surfaces the new row's id as `amended_insight_id`.
|
||||||
|
|
||||||
|
Per-`(library_id, file_path)` async mutex (`AppState.insight_chat.chat_locks`)
|
||||||
|
serialises concurrent turns on the same insight so the JSON blob doesn't race.
|
||||||
|
|
||||||
|
Context management is a soft bound: if the serialized history exceeds
|
||||||
|
`num_ctx - 2048` tokens (cheap 4-byte/token heuristic), the oldest
|
||||||
|
assistant-tool_call + tool_result pairs are dropped until under budget. The
|
||||||
|
initial user message (with any images) and system prompt are always preserved.
|
||||||
|
The `truncated` event / flag is surfaced to the client when a drop occurred.
|
||||||
|
|
||||||
|
Configurable env:
|
||||||
|
- `AGENTIC_CHAT_MAX_ITERATIONS` — cap on tool-calling iterations per turn
|
||||||
|
(default 6). Per-request `max_iterations` is clamped to this cap.
|
||||||
|
|
||||||
|
**Apollo Places integration (optional):**
|
||||||
|
|
||||||
|
The sibling Apollo project (personal location-history viewer) owns
|
||||||
|
user-defined Places: `name + lat/lon + radius_m + description (+ optional
|
||||||
|
category)`. When `APOLLO_API_BASE_URL` is set, ImageApi queries
|
||||||
|
`/api/places/contains?lat=&lon=` to enrich the LLM prompt's location
|
||||||
|
string. See `src/ai/apollo_client.rs` and `src/ai/insight_generator.rs`:
|
||||||
|
|
||||||
|
- **Auto-enrichment** (always on when configured): the per-photo location
|
||||||
|
resolver folds the most-specific containing Place ("Home — near
|
||||||
|
Cambridge, MA" or "Home (My house in Cambridge) — near Cambridge, MA"
|
||||||
|
when a description is set) into the location field of `combine_contexts`.
|
||||||
|
Smallest-radius wins — Apollo sorts server-side, this code takes `[0]`.
|
||||||
|
- **Agentic tool** `get_personal_place_at(latitude, longitude)`: registered
|
||||||
|
alongside `reverse_geocode` only when `apollo_enabled()` returns true.
|
||||||
|
Returns "- Name [category]: description (radius N m)" lines, smallest
|
||||||
|
radius first. The tool is **deliberately narrow** — no enumerate-all
|
||||||
|
variant; auto-enrichment covers the photo-context path and the agentic
|
||||||
|
tool covers ad-hoc lat/lon questions in chat continuation.
|
||||||
|
|
||||||
|
Failure modes degrade silently to the legacy Nominatim path: 5 s timeout,
|
||||||
|
errors logged at `warn`, empty results returned. Apollo's routes are
|
||||||
|
unauthenticated (single-user, LAN-trust); add JWT auth here + on Apollo's
|
||||||
|
side if exposing beyond a trusted network.
|
||||||
|
|
||||||
|
## Dependencies of Note
|
||||||
|
|
||||||
|
### Rust crates
|
||||||
|
|
||||||
|
- **actix-web**: HTTP framework
|
||||||
|
- **diesel**: ORM for SQLite
|
||||||
|
- **jsonwebtoken**: JWT implementation
|
||||||
|
- **kamadak-exif**: EXIF parsing
|
||||||
|
- **image**: Thumbnail generation
|
||||||
|
- **walkdir**: Directory traversal
|
||||||
|
- **rayon**: Parallel processing
|
||||||
|
- **opentelemetry**: Distributed tracing
|
||||||
|
- **bcrypt**: Password hashing
|
||||||
|
- **infer**: Magic number file type detection
|
||||||
|
|
||||||
|
### External binaries (must be on `PATH`)
|
||||||
|
|
||||||
|
- **`ffmpeg`** — video thumbnail extraction (`StreamActor`, HLS pipeline) and
|
||||||
|
the HEIF/HEIC/NEF/ARW thumbnail fallback in `generate_image_thumbnail_ffmpeg`.
|
||||||
|
Required for any deploy that holds video or HEIF files.
|
||||||
|
- **`exiftool`** — optional but strongly recommended for RAW-heavy libraries.
|
||||||
|
The thumbnail pipeline shells out to it as the slow-path fallback for
|
||||||
|
embedded preview extraction (Nikon MakerNote `PreviewIFD`, Canon SubIFDs,
|
||||||
|
etc. — anything kamadak-exif's IFD0/IFD1 readers can't reach). Without
|
||||||
|
exiftool installed, RAWs whose preview lives outside IFD0/IFD1 will fall
|
||||||
|
through to ffmpeg, which often produces black thumbnails. Install via
|
||||||
|
package manager: `apt install libimage-exiftool-perl`,
|
||||||
|
`brew install exiftool`, `winget install OliverBetz.ExifTool`, or
|
||||||
|
`choco install exiftool`.
|
||||||
4656
Cargo.lock
generated
4656
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
71
Cargo.toml
71
Cargo.toml
@@ -1,38 +1,63 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "image-api"
|
name = "image-api"
|
||||||
version = "0.1.0"
|
version = "1.1.0"
|
||||||
authors = ["Cameron Cordes <cameronc.dev@gmail.com>"]
|
authors = ["Cameron Cordes <cameronc.dev@gmail.com>"]
|
||||||
edition = "2018"
|
edition = "2024"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = "thin"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix = "0.10"
|
actix = "0.13.1"
|
||||||
actix-web = "3"
|
actix-web = "4"
|
||||||
actix-rt = "1"
|
actix-rt = "2.6"
|
||||||
actix-files = "0.5"
|
tokio = { version = "1.42.0", features = ["default", "process", "sync", "macros", "rt-multi-thread"] }
|
||||||
actix-multipart = "0.3.0"
|
actix-files = "0.6"
|
||||||
|
actix-cors = "0.7"
|
||||||
|
actix-multipart = "0.7.2"
|
||||||
|
actix-governor = "0.5"
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "9.3.0"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
diesel = { version = "1.4.5", features = ["sqlite"] }
|
diesel = { version = "2.2.10", features = ["sqlite"] }
|
||||||
hmac = "0.11"
|
libsqlite3-sys = { version = "0.35", features = ["bundled"] }
|
||||||
sha2 = "0.9"
|
diesel_migrations = "2.2.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
clap = { version = "4.5", features = ["derive"] }
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
bcrypt = "0.9"
|
bcrypt = "0.17.1"
|
||||||
image = { version = "0.23", default-features = false, features = ["jpeg", "png", "jpeg_rayon"] }
|
image = { version = "0.25.5", default-features = false, features = ["jpeg", "png", "rayon", "webp", "tiff", "avif"] }
|
||||||
walkdir = "2"
|
infer = "0.16"
|
||||||
rayon = "1.3"
|
walkdir = "2.4.0"
|
||||||
notify = "4.0"
|
rayon = "1.5"
|
||||||
path-absolutize = "3.0.6"
|
path-absolutize = "3.1"
|
||||||
log="0.4"
|
log = "0.4"
|
||||||
env_logger="0.8"
|
env_logger = "0.11.5"
|
||||||
actix-web-prom = "0.5.1"
|
actix-web-prom = "0.9.0"
|
||||||
prometheus = "0.11"
|
prometheus = "0.13"
|
||||||
lazy_static = "1.1"
|
lazy_static = "1.5"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
rand = "0.8.5"
|
||||||
|
opentelemetry = { version = "0.31.0", features = ["default", "metrics", "tracing"] }
|
||||||
|
opentelemetry_sdk = { version = "0.31.0", features = ["default", "rt-tokio-current-thread", "metrics"] }
|
||||||
|
opentelemetry-otlp = { version = "0.31.0", features = ["default", "metrics", "tracing", "grpc-tonic"] }
|
||||||
|
opentelemetry-stdout = "0.31.0"
|
||||||
|
opentelemetry-appender-log = "0.31.0"
|
||||||
|
tempfile = "3.20.0"
|
||||||
|
regex = "1.11.1"
|
||||||
|
exif = { package = "kamadak-exif", version = "0.6.1" }
|
||||||
|
reqwest = { version = "0.12", features = ["json", "stream", "multipart"] }
|
||||||
|
async-stream = "0.3"
|
||||||
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
|
bytes = "1"
|
||||||
|
urlencoding = "2.1"
|
||||||
|
zerocopy = "0.8"
|
||||||
|
ical = "0.11"
|
||||||
|
scraper = "0.20"
|
||||||
|
base64 = "0.22"
|
||||||
|
blake3 = "1.5"
|
||||||
|
async-trait = "0.1"
|
||||||
|
indicatif = "0.17"
|
||||||
|
|||||||
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@@ -1,7 +1,7 @@
|
|||||||
pipeline {
|
pipeline {
|
||||||
agent {
|
agent {
|
||||||
docker {
|
docker {
|
||||||
image 'rust:1.55'
|
image 'rust:1.59'
|
||||||
args '-v "$PWD":/usr/src/image-api'
|
args '-v "$PWD":/usr/src/image-api'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
179
README.md
179
README.md
@@ -2,14 +2,191 @@
|
|||||||
This is an Actix-web server for serving images and videos from a filesystem.
|
This is an Actix-web server for serving images and videos from a filesystem.
|
||||||
Upon first run it will generate thumbnails for all images and videos at `BASE_PATH`.
|
Upon first run it will generate thumbnails for all images and videos at `BASE_PATH`.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
- Automatic thumbnail generation for images and videos
|
||||||
|
- EXIF data extraction and storage for photos
|
||||||
|
- File watching with NFS support (polling-based)
|
||||||
|
- Video streaming with HLS
|
||||||
|
- Tag-based organization
|
||||||
|
- Memories API for browsing photos by date
|
||||||
|
- **Video Wall** - Auto-generated short preview clips for videos, served via a grid view
|
||||||
|
- **AI-Powered Photo Insights** - Generate contextual insights from photos using LLMs
|
||||||
|
- **RAG-based Context Retrieval** - Semantic search over daily conversation summaries
|
||||||
|
- **Automatic Daily Summaries** - LLM-generated summaries of daily conversations with embeddings
|
||||||
|
|
||||||
|
## External Dependencies
|
||||||
|
|
||||||
|
### ffmpeg (required)
|
||||||
|
`ffmpeg` must be on `PATH`. It is used for:
|
||||||
|
- **HLS video streaming** — transcoding/segmenting source videos into `.m3u8` + `.ts` playlists
|
||||||
|
- **Video thumbnails** — extracting a frame at the 3-second mark
|
||||||
|
- **Video preview clips** — short looping previews for the Video Wall
|
||||||
|
- **HEIC / HEIF thumbnails** — decoding Apple's HEIC format (your ffmpeg build must include
|
||||||
|
`libheif`; most modern builds do)
|
||||||
|
|
||||||
|
Builds used in development: the `gyan.dev` full build on Windows, and distro `ffmpeg`
|
||||||
|
packages on Linux work fine. If HEIC thumbnails silently fail, check
|
||||||
|
`ffmpeg -formats | grep heif` to confirm HEIF support.
|
||||||
|
|
||||||
|
### RAW photo thumbnails
|
||||||
|
RAW formats (ARW, NEF, CR2, CR3, DNG, RAF, ORF, RW2, PEF, SRW, TIFF) are thumbnailed
|
||||||
|
by reading an embedded JPEG preview out of the TIFF container — no external RAW
|
||||||
|
decoder (libraw / dcraw) is involved. The pipeline tries two layers in order and
|
||||||
|
keeps the largest valid JPEG:
|
||||||
|
|
||||||
|
1. **Fast path (no extra dependency)** — `kamadak-exif` reads
|
||||||
|
`JPEGInterchangeFormat` from IFD0 / IFD1 directly. Covers older bodies and
|
||||||
|
most DNGs.
|
||||||
|
2. **`exiftool` fallback (recommended for RAW-heavy libraries)** — shells out
|
||||||
|
to extract `PreviewImage` / `JpgFromRaw` / `OtherImage`, which reaches
|
||||||
|
MakerNote and SubIFD-hosted previews kamadak-exif can't see (e.g. Nikon's
|
||||||
|
`PreviewIFD`, where modern Nikon bodies stash the full-res review JPEG).
|
||||||
|
If `exiftool` isn't on `PATH` this layer is skipped silently and only the
|
||||||
|
fast-path result is used.
|
||||||
|
|
||||||
|
Install `exiftool` via your package manager:
|
||||||
|
- macOS: `brew install exiftool`
|
||||||
|
- Linux (Debian/Ubuntu): `apt install libimage-exiftool-perl`
|
||||||
|
- Windows: `winget install OliverBetz.ExifTool` or `choco install exiftool`
|
||||||
|
|
||||||
|
Files where neither layer produces a valid preview fall back to ffmpeg. Anything
|
||||||
|
that still can't be decoded is marked with a `<thumb>.unsupported` sentinel in
|
||||||
|
the thumbnail directory so we don't retry it every scan. Delete those sentinels
|
||||||
|
(and any cached black thumbnails) to force retries after a tooling upgrade.
|
||||||
|
|
||||||
## Environment
|
## Environment
|
||||||
There are a handful of required environment variables to have the API run.
|
There are a handful of required environment variables to have the API run.
|
||||||
They should be defined where the binary is located or above it in an `.env` file.
|
They should be defined where the binary is located or above it in an `.env` file.
|
||||||
|
|
||||||
- `DATABASE_URL` is a path or url to a database (currently only SQLite is tested)
|
- `DATABASE_URL` is a path or url to a database (currently only SQLite is tested)
|
||||||
- `BASE_PATH` is the root from which you want to serve images and videos
|
- `BASE_PATH` is the root from which you want to serve images and videos
|
||||||
- `THUMBNAILS` is a path where generated thumbnails should be stored
|
- `THUMBNAILS` is a path where generated thumbnails should be stored. Thumbnails
|
||||||
|
mirror the source tree under `BASE_PATH` and keep the source's original
|
||||||
|
extension (e.g. `foo.arw` or `bar.mp4`), though the file contents are always
|
||||||
|
JPEG bytes — browsers content-sniff. Files that can't be thumbnailed by the
|
||||||
|
`image` crate, ffmpeg, or an embedded RAW preview get a zero-byte
|
||||||
|
`<thumb_path>.unsupported` sentinel in this directory so subsequent scans
|
||||||
|
skip them. Delete the `*.unsupported` files to force retries (for example
|
||||||
|
after upgrading ffmpeg or adding libheif)
|
||||||
|
- `VIDEO_PATH` is a path where HLS playlists and video parts should be stored
|
||||||
|
- `GIFS_DIRECTORY` is a path where generated video GIF thumbnails should be stored
|
||||||
- `BIND_URL` is the url and port to bind to (typically your own IP address)
|
- `BIND_URL` is the url and port to bind to (typically your own IP address)
|
||||||
- `SECRET_KEY` is the *hopefully* random string to sign Tokens with
|
- `SECRET_KEY` is the *hopefully* random string to sign Tokens with
|
||||||
- `RUST_LOG` is one of `off, error, warn, info, debug, trace`, from least to most noisy [error is default]
|
- `RUST_LOG` is one of `off, error, warn, info, debug, trace`, from least to most noisy [error is default]
|
||||||
|
- `EXCLUDED_DIRS` is a comma separated list of directories to exclude from the Memories API
|
||||||
|
- `PREVIEW_CLIPS_DIRECTORY` (optional) is a path where generated video preview clips should be stored [default: `preview_clips`]
|
||||||
|
- `WATCH_QUICK_INTERVAL_SECONDS` (optional) is the interval in seconds for quick file scans [default: 60]
|
||||||
|
- `WATCH_FULL_INTERVAL_SECONDS` (optional) is the interval in seconds for full file scans [default: 3600]
|
||||||
|
|
||||||
|
### AI Insights Configuration (Optional)
|
||||||
|
|
||||||
|
The following environment variables configure AI-powered photo insights and daily conversation summaries:
|
||||||
|
|
||||||
|
#### Ollama Configuration
|
||||||
|
- `OLLAMA_PRIMARY_URL` - Primary Ollama server URL [default: `http://localhost:11434`]
|
||||||
|
- Example: `http://desktop:11434` (your main/powerful server)
|
||||||
|
- `OLLAMA_FALLBACK_URL` - Fallback Ollama server URL (optional)
|
||||||
|
- Example: `http://server:11434` (always-on backup server)
|
||||||
|
- `OLLAMA_PRIMARY_MODEL` - Model to use on primary server [default: `nemotron-3-nano:30b`]
|
||||||
|
- Example: `nemotron-3-nano:30b`, `llama3.2:3b`, etc.
|
||||||
|
- `OLLAMA_FALLBACK_MODEL` - Model to use on fallback server (optional)
|
||||||
|
- If not set, uses `OLLAMA_PRIMARY_MODEL` on fallback server
|
||||||
|
|
||||||
|
**Legacy Variables** (still supported):
|
||||||
|
- `OLLAMA_URL` - Used if `OLLAMA_PRIMARY_URL` not set
|
||||||
|
- `OLLAMA_MODEL` - Used if `OLLAMA_PRIMARY_MODEL` not set
|
||||||
|
|
||||||
|
#### OpenRouter Configuration (Hybrid Backend)
|
||||||
|
The hybrid agentic backend keeps embeddings + vision local (Ollama) while routing
|
||||||
|
chat + tool-calling to OpenRouter. Enabled per-request when the client sends
|
||||||
|
`backend=hybrid`.
|
||||||
|
|
||||||
|
- `OPENROUTER_API_KEY` - OpenRouter API key. Required to enable the hybrid backend.
|
||||||
|
- `OPENROUTER_DEFAULT_MODEL` - Model id used when the client doesn't specify one
|
||||||
|
[default: `anthropic/claude-sonnet-4`]
|
||||||
|
- Example: `openai/gpt-4o-mini`, `google/gemini-2.5-flash`
|
||||||
|
- `OPENROUTER_ALLOWED_MODELS` - Comma-separated curated allowlist exposed to
|
||||||
|
clients via `GET /insights/openrouter/models`. The mobile picker shows only
|
||||||
|
these. Empty/unset = no picker, server default is used.
|
||||||
|
- Example: `openai/gpt-4o-mini,anthropic/claude-haiku-4-5,google/gemini-2.5-flash`
|
||||||
|
- `OPENROUTER_BASE_URL` - Override base URL [default: `https://openrouter.ai/api/v1`]
|
||||||
|
- `OPENROUTER_EMBEDDING_MODEL` - Embedding model for OpenRouter
|
||||||
|
[default: `openai/text-embedding-3-small`]. Only used if/when embeddings are
|
||||||
|
routed through OpenRouter (currently embeddings stay local).
|
||||||
|
- `OPENROUTER_HTTP_REFERER` - Optional `HTTP-Referer` for OpenRouter attribution
|
||||||
|
- `OPENROUTER_APP_TITLE` - Optional `X-Title` for OpenRouter attribution
|
||||||
|
|
||||||
|
Capability checks are skipped for the curated allowlist — bad model ids surface
|
||||||
|
as a 4xx from the chat call. Pick tool-capable models.
|
||||||
|
|
||||||
|
#### SMS API Configuration
|
||||||
|
- `SMS_API_URL` - URL to SMS message API [default: `http://localhost:8000`]
|
||||||
|
- Used to fetch conversation data for context in insights
|
||||||
|
- `SMS_API_TOKEN` - Authentication token for SMS API (optional)
|
||||||
|
|
||||||
|
#### Agentic Insight Generation
|
||||||
|
- `AGENTIC_MAX_ITERATIONS` - Maximum tool-call iterations per agentic insight request [default: `10`]
|
||||||
|
- Controls how many times the model can invoke tools before being forced to produce a final answer
|
||||||
|
- Increase for more thorough context gathering; decrease to limit response time
|
||||||
|
|
||||||
|
#### Insight Chat Continuation
|
||||||
|
After an agentic insight is generated, the conversation can be continued. Endpoints:
|
||||||
|
- `POST /insights/chat` — single-turn reply (non-streaming)
|
||||||
|
- `POST /insights/chat/stream` — SSE variant with live `text` deltas and
|
||||||
|
`tool_call` / `tool_result` events. Mobile client uses this.
|
||||||
|
- `GET /insights/chat/history?path=...&library=...` — rendered transcript;
|
||||||
|
each assistant message carries a `tools: [{name, arguments, result}]` array
|
||||||
|
- `POST /insights/chat/rewind` — truncate transcript at a rendered index
|
||||||
|
(drops that message + any preceding tool scaffolding + later turns). Used
|
||||||
|
for "try again from here" flows. The initial user message is protected.
|
||||||
|
|
||||||
|
Amend mode (`amend: true` in the chat request body) regenerates the insight's
|
||||||
|
title and inserts a new row instead of appending to the existing transcript,
|
||||||
|
so you can rewrite the saved summary from within chat.
|
||||||
|
|
||||||
|
- `AGENTIC_CHAT_MAX_ITERATIONS` - Cap on tool-calling iterations per chat turn [default: `6`]
|
||||||
|
- Per-request `max_iterations` (when sent by the client) is clamped to this cap
|
||||||
|
|
||||||
|
#### Fallback Behavior
|
||||||
|
- Primary server is tried first with 5-second connection timeout
|
||||||
|
- On failure, automatically falls back to secondary server (if configured)
|
||||||
|
- Total request timeout is 120 seconds to accommodate LLM inference
|
||||||
|
- Logs indicate which server/model was used and any failover attempts
|
||||||
|
|
||||||
|
#### Daily Summary Generation
|
||||||
|
Daily conversation summaries are generated automatically on server startup. Configure in `src/main.rs`:
|
||||||
|
- Date range for summary generation
|
||||||
|
- Contacts to process
|
||||||
|
- Model version used for embeddings: `nomic-embed-text:v1.5`
|
||||||
|
|
||||||
|
### Apollo + Face Recognition (Optional)
|
||||||
|
|
||||||
|
Apollo (sibling project) hosts both the Places API and the local insightface
|
||||||
|
inference service. Both integrations are optional and degrade gracefully when
|
||||||
|
unset.
|
||||||
|
|
||||||
|
- `APOLLO_API_BASE_URL` - Base URL of the sibling Apollo backend.
|
||||||
|
- When set, photo-insight enrichment folds the user's personal place name
|
||||||
|
(Home, Work, Cabin, ...) into the location string, and the agentic loop
|
||||||
|
gains a `get_personal_place_at` tool. Unset = legacy Nominatim-only path.
|
||||||
|
- `APOLLO_FACE_API_BASE_URL` - Base URL for the face-detection service.
|
||||||
|
- Falls back to `APOLLO_API_BASE_URL` when unset (typical single-Apollo
|
||||||
|
deploy). Both unset = face feature disabled (file-watch hook and
|
||||||
|
manual-face endpoints short-circuit silently).
|
||||||
|
- `FACE_AUTOBIND_MIN_COS` (Phase 3) - Cosine-sim floor for auto-binding a
|
||||||
|
detected face to an existing same-named person via people-tag bootstrap
|
||||||
|
[default: `0.4`].
|
||||||
|
- `FACE_DETECT_CONCURRENCY` (Phase 3) - Per-scan-tick concurrent detect
|
||||||
|
calls fired by the file watcher [default: `8`]. Apollo serializes them
|
||||||
|
via its single-worker GPU pool.
|
||||||
|
- `FACE_DETECT_TIMEOUT_SEC` - reqwest client timeout per detect call
|
||||||
|
[default: `60`]. CPU inference on a backlog can take many seconds.
|
||||||
|
- `FACE_BACKLOG_MAX_PER_TICK` - Cap on the per-tick backlog drain (photos
|
||||||
|
with a content_hash but no face_detections row) [default: `64`]. Runs
|
||||||
|
every watcher tick regardless of quick-vs-full scan, so the unscanned
|
||||||
|
set drains independently of the file walk.
|
||||||
|
- `FACE_HASH_BACKFILL_MAX_PER_TICK` - Cap on the per-tick content_hash
|
||||||
|
backfill (photos that were registered before the hash field was
|
||||||
|
populated retroactively) [default: `2000`]. Errors don't burn the cap;
|
||||||
|
only successful hashes count.
|
||||||
|
|
||||||
|
|||||||
3
migrations/2021-09-02-000740_create_tags/down.sql
Normal file
3
migrations/2021-09-02-000740_create_tags/down.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
DROP TABLE tags;
|
||||||
|
DROP TABLE tagged_photo;
|
||||||
|
|
||||||
13
migrations/2021-09-02-000740_create_tags/up.sql
Normal file
13
migrations/2021-09-02-000740_create_tags/up.sql
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
CREATE TABLE tags (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
created_time BIGINT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE tagged_photo (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
photo_name TEXT NOT NULL,
|
||||||
|
tag_id INTEGER NOT NULL,
|
||||||
|
created_time BIGINT NOT NULL,
|
||||||
|
CONSTRAINT tagid FOREIGN KEY (tag_id) REFERENCES tags (id) ON DELETE CASCADE ON UPDATE CASCADE
|
||||||
|
);
|
||||||
2
migrations/2025-12-17-000000_create_image_exif/down.sql
Normal file
2
migrations/2025-12-17-000000_create_image_exif/down.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
DROP INDEX IF EXISTS idx_image_exif_file_path;
|
||||||
|
DROP TABLE IF EXISTS image_exif;
|
||||||
32
migrations/2025-12-17-000000_create_image_exif/up.sql
Normal file
32
migrations/2025-12-17-000000_create_image_exif/up.sql
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
CREATE TABLE image_exif (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
|
||||||
|
-- Camera Information
|
||||||
|
camera_make TEXT,
|
||||||
|
camera_model TEXT,
|
||||||
|
lens_model TEXT,
|
||||||
|
|
||||||
|
-- Image Properties
|
||||||
|
width INTEGER,
|
||||||
|
height INTEGER,
|
||||||
|
orientation INTEGER,
|
||||||
|
|
||||||
|
-- GPS Coordinates
|
||||||
|
gps_latitude REAL,
|
||||||
|
gps_longitude REAL,
|
||||||
|
gps_altitude REAL,
|
||||||
|
|
||||||
|
-- Capture Settings
|
||||||
|
focal_length REAL,
|
||||||
|
aperture REAL,
|
||||||
|
shutter_speed TEXT,
|
||||||
|
iso INTEGER,
|
||||||
|
date_taken BIGINT,
|
||||||
|
|
||||||
|
-- Housekeeping
|
||||||
|
created_time BIGINT NOT NULL,
|
||||||
|
last_modified BIGINT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_image_exif_file_path ON image_exif(file_path);
|
||||||
9
migrations/2025-12-17-230000_add_indexes/down.sql
Normal file
9
migrations/2025-12-17-230000_add_indexes/down.sql
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
-- Rollback indexes
|
||||||
|
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_userid;
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_path;
|
||||||
|
DROP INDEX IF EXISTS idx_tags_name;
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_photo_name;
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_tag_id;
|
||||||
|
DROP INDEX IF EXISTS idx_image_exif_camera;
|
||||||
|
DROP INDEX IF EXISTS idx_image_exif_gps;
|
||||||
17
migrations/2025-12-17-230000_add_indexes/up.sql
Normal file
17
migrations/2025-12-17-230000_add_indexes/up.sql
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
-- Add indexes for improved query performance
|
||||||
|
|
||||||
|
-- Favorites table indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_favorites_userid ON favorites(userid);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_favorites_path ON favorites(path);
|
||||||
|
|
||||||
|
-- Tags table indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tags_name ON tags(name);
|
||||||
|
|
||||||
|
-- Tagged photos indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tagged_photo_photo_name ON tagged_photo(photo_name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tagged_photo_tag_id ON tagged_photo(tag_id);
|
||||||
|
|
||||||
|
-- EXIF table indexes (date_taken already has index from previous migration)
|
||||||
|
-- Adding composite index for common EXIF queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_image_exif_camera ON image_exif(camera_make, camera_model);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_image_exif_gps ON image_exif(gps_latitude, gps_longitude);
|
||||||
3
migrations/2025-12-17-230100_unique_favorites/down.sql
Normal file
3
migrations/2025-12-17-230100_unique_favorites/down.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
-- Rollback unique constraint on favorites
|
||||||
|
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_unique;
|
||||||
12
migrations/2025-12-17-230100_unique_favorites/up.sql
Normal file
12
migrations/2025-12-17-230100_unique_favorites/up.sql
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
-- Add unique constraint to prevent duplicate favorites per user
|
||||||
|
|
||||||
|
-- First, remove any existing duplicates (keep the oldest one)
|
||||||
|
DELETE FROM favorites
|
||||||
|
WHERE rowid NOT IN (
|
||||||
|
SELECT MIN(rowid)
|
||||||
|
FROM favorites
|
||||||
|
GROUP BY userid, path
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Add unique index to enforce constraint
|
||||||
|
CREATE UNIQUE INDEX idx_favorites_unique ON favorites(userid, path);
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Remove date_taken index
|
||||||
|
DROP INDEX IF EXISTS idx_image_exif_date_taken;
|
||||||
2
migrations/2025-12-18-120000_add_date_taken_index/up.sql
Normal file
2
migrations/2025-12-18-120000_add_date_taken_index/up.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add index on date_taken for efficient date range queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_image_exif_date_taken ON image_exif(date_taken);
|
||||||
3
migrations/2025-12-31-000000_add_ai_insights/down.sql
Normal file
3
migrations/2025-12-31-000000_add_ai_insights/down.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
-- Rollback AI insights table
|
||||||
|
DROP INDEX IF EXISTS idx_photo_insights_path;
|
||||||
|
DROP TABLE IF EXISTS photo_insights;
|
||||||
11
migrations/2025-12-31-000000_add_ai_insights/up.sql
Normal file
11
migrations/2025-12-31-000000_add_ai_insights/up.sql
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
-- AI-generated insights for individual photos
|
||||||
|
CREATE TABLE IF NOT EXISTS photo_insights (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE, -- Full path to the photo
|
||||||
|
title TEXT NOT NULL, -- "At the beach with Sarah"
|
||||||
|
summary TEXT NOT NULL, -- 2-3 sentence description
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_photo_insights_path ON photo_insights(file_path);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE daily_conversation_summaries;
|
||||||
19
migrations/2026-01-04-060000_add_daily_summaries/up.sql
Normal file
19
migrations/2026-01-04-060000_add_daily_summaries/up.sql
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
-- Daily conversation summaries for improved RAG quality
|
||||||
|
-- Each row = one day's conversation with a contact, summarized by LLM and embedded
|
||||||
|
|
||||||
|
CREATE TABLE daily_conversation_summaries (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
date TEXT NOT NULL, -- ISO date "2024-08-15"
|
||||||
|
contact TEXT NOT NULL, -- Contact name
|
||||||
|
summary TEXT NOT NULL, -- LLM-generated 3-5 sentence summary
|
||||||
|
message_count INTEGER NOT NULL, -- Number of messages in this day
|
||||||
|
embedding BLOB NOT NULL, -- 768-dim vector of the summary
|
||||||
|
created_at BIGINT NOT NULL, -- When this summary was generated
|
||||||
|
model_version TEXT NOT NULL, -- "nomic-embed-text:v1.5"
|
||||||
|
UNIQUE(date, contact)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for efficient querying
|
||||||
|
CREATE INDEX idx_daily_summaries_date ON daily_conversation_summaries(date);
|
||||||
|
CREATE INDEX idx_daily_summaries_contact ON daily_conversation_summaries(contact);
|
||||||
|
CREATE INDEX idx_daily_summaries_date_contact ON daily_conversation_summaries(date, contact);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE IF EXISTS calendar_events;
|
||||||
20
migrations/2026-01-05-000000_add_calendar_events/up.sql
Normal file
20
migrations/2026-01-05-000000_add_calendar_events/up.sql
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
CREATE TABLE calendar_events (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
event_uid TEXT,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
location TEXT,
|
||||||
|
start_time BIGINT NOT NULL,
|
||||||
|
end_time BIGINT NOT NULL,
|
||||||
|
all_day BOOLEAN NOT NULL DEFAULT 0,
|
||||||
|
organizer TEXT,
|
||||||
|
attendees TEXT,
|
||||||
|
embedding BLOB,
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
source_file TEXT,
|
||||||
|
UNIQUE(event_uid, start_time)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_calendar_start_time ON calendar_events(start_time);
|
||||||
|
CREATE INDEX idx_calendar_end_time ON calendar_events(end_time);
|
||||||
|
CREATE INDEX idx_calendar_time_range ON calendar_events(start_time, end_time);
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE IF EXISTS location_history;
|
||||||
19
migrations/2026-01-05-000100_add_location_history/up.sql
Normal file
19
migrations/2026-01-05-000100_add_location_history/up.sql
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
CREATE TABLE location_history (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
timestamp BIGINT NOT NULL,
|
||||||
|
latitude REAL NOT NULL,
|
||||||
|
longitude REAL NOT NULL,
|
||||||
|
accuracy INTEGER,
|
||||||
|
activity TEXT,
|
||||||
|
activity_confidence INTEGER,
|
||||||
|
place_name TEXT,
|
||||||
|
place_category TEXT,
|
||||||
|
embedding BLOB,
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
source_file TEXT,
|
||||||
|
UNIQUE(timestamp, latitude, longitude)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_location_timestamp ON location_history(timestamp);
|
||||||
|
CREATE INDEX idx_location_coords ON location_history(latitude, longitude);
|
||||||
|
CREATE INDEX idx_location_activity ON location_history(activity);
|
||||||
1
migrations/2026-01-05-000200_add_search_history/down.sql
Normal file
1
migrations/2026-01-05-000200_add_search_history/down.sql
Normal file
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE IF EXISTS search_history;
|
||||||
13
migrations/2026-01-05-000200_add_search_history/up.sql
Normal file
13
migrations/2026-01-05-000200_add_search_history/up.sql
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
CREATE TABLE search_history (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
timestamp BIGINT NOT NULL,
|
||||||
|
query TEXT NOT NULL,
|
||||||
|
search_engine TEXT,
|
||||||
|
embedding BLOB NOT NULL,
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
source_file TEXT,
|
||||||
|
UNIQUE(timestamp, query)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_search_timestamp ON search_history(timestamp);
|
||||||
|
CREATE INDEX idx_search_query ON search_history(query);
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- Revert search performance optimization indexes
|
||||||
|
|
||||||
|
DROP INDEX IF EXISTS idx_image_exif_date_path;
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_count;
|
||||||
15
migrations/2026-01-18-000000_optimize_photo_search/up.sql
Normal file
15
migrations/2026-01-18-000000_optimize_photo_search/up.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
-- Add composite indexes for search performance optimization
|
||||||
|
-- This migration addresses N+1 query issues and enables database-level sorting
|
||||||
|
|
||||||
|
-- Covering index for date-sorted queries (supports ORDER BY + pagination)
|
||||||
|
-- Enables efficient date-based sorting without loading all files into memory
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_image_exif_date_path
|
||||||
|
ON image_exif(date_taken DESC, file_path);
|
||||||
|
|
||||||
|
-- Optimize batch tag count queries with GROUP BY
|
||||||
|
-- Reduces N individual queries to a single batch query
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tagged_photo_count
|
||||||
|
ON tagged_photo(photo_name, tag_id);
|
||||||
|
|
||||||
|
-- Update query planner statistics to optimize query execution
|
||||||
|
ANALYZE;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP TABLE IF EXISTS video_preview_clips;
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
CREATE TABLE video_preview_clips (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending',
|
||||||
|
duration_seconds REAL,
|
||||||
|
file_size_bytes INTEGER,
|
||||||
|
error_message TEXT,
|
||||||
|
created_at TEXT NOT NULL,
|
||||||
|
updated_at TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_preview_clips_file_path ON video_preview_clips(file_path);
|
||||||
|
CREATE INDEX idx_preview_clips_status ON video_preview_clips(status);
|
||||||
19
migrations/2026-04-02-000000_photo_insights_history/down.sql
Normal file
19
migrations/2026-04-02-000000_photo_insights_history/down.sql
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
-- Restore original schema, retaining only the current insight per file.
|
||||||
|
CREATE TABLE photo_insights_old (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO photo_insights_old (id, file_path, title, summary, generated_at, model_version)
|
||||||
|
SELECT id, file_path, title, summary, generated_at, model_version
|
||||||
|
FROM photo_insights
|
||||||
|
WHERE is_current = 1;
|
||||||
|
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
ALTER TABLE photo_insights_old RENAME TO photo_insights;
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_photo_insights_path ON photo_insights(file_path);
|
||||||
25
migrations/2026-04-02-000000_photo_insights_history/up.sql
Normal file
25
migrations/2026-04-02-000000_photo_insights_history/up.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
-- Convert photo_insights to an append-only history table.
|
||||||
|
-- SQLite cannot drop a UNIQUE constraint via ALTER TABLE, so we recreate the table.
|
||||||
|
-- This preserves existing insight IDs so that future entity_facts.source_insight_id
|
||||||
|
-- FK references remain valid.
|
||||||
|
|
||||||
|
CREATE TABLE photo_insights_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Migrate existing rows; mark them all as current (one row per path currently).
|
||||||
|
INSERT INTO photo_insights_new (id, file_path, title, summary, generated_at, model_version, is_current)
|
||||||
|
SELECT id, file_path, title, summary, generated_at, model_version, 1
|
||||||
|
FROM photo_insights;
|
||||||
|
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
ALTER TABLE photo_insights_new RENAME TO photo_insights;
|
||||||
|
|
||||||
|
CREATE INDEX idx_photo_insights_file_path ON photo_insights(file_path);
|
||||||
|
CREATE INDEX idx_photo_insights_current ON photo_insights(file_path, is_current);
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
DROP TABLE IF EXISTS entity_photo_links;
|
||||||
|
DROP TABLE IF EXISTS entity_facts;
|
||||||
|
DROP TABLE IF EXISTS entities;
|
||||||
55
migrations/2026-04-02-000100_add_knowledge_memory/up.sql
Normal file
55
migrations/2026-04-02-000100_add_knowledge_memory/up.sql
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
-- Entity-relationship knowledge memory tables.
|
||||||
|
-- Entities are the nodes (people, places, events, things).
|
||||||
|
-- entity_facts are typed claims about or between entities.
|
||||||
|
-- entity_photo_links connect entities to specific photos.
|
||||||
|
|
||||||
|
CREATE TABLE entities (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
entity_type TEXT NOT NULL, -- 'person' | 'place' | 'event' | 'thing'
|
||||||
|
description TEXT NOT NULL DEFAULT '',
|
||||||
|
embedding BLOB, -- 768-dim f32 vector; nullable if embedding service was unavailable
|
||||||
|
confidence REAL NOT NULL DEFAULT 0.5,
|
||||||
|
status TEXT NOT NULL DEFAULT 'active', -- 'active' | 'reviewed' | 'rejected'
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
updated_at BIGINT NOT NULL,
|
||||||
|
UNIQUE(name, entity_type)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_entities_type ON entities(entity_type);
|
||||||
|
CREATE INDEX idx_entities_status ON entities(status);
|
||||||
|
CREATE INDEX idx_entities_name ON entities(name);
|
||||||
|
|
||||||
|
CREATE TABLE entity_facts (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
subject_entity_id INTEGER NOT NULL,
|
||||||
|
predicate TEXT NOT NULL,
|
||||||
|
object_entity_id INTEGER, -- nullable: entity-to-entity relationship target
|
||||||
|
object_value TEXT, -- nullable: free-text attribute value
|
||||||
|
source_photo TEXT, -- photo path that prompted extraction (injected server-side)
|
||||||
|
source_insight_id INTEGER, -- backfilled after insight is stored
|
||||||
|
confidence REAL NOT NULL DEFAULT 0.6,
|
||||||
|
status TEXT NOT NULL DEFAULT 'active', -- 'active' | 'reviewed' | 'rejected'
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
CONSTRAINT fk_ef_subject FOREIGN KEY (subject_entity_id) REFERENCES entities(id) ON DELETE CASCADE,
|
||||||
|
CONSTRAINT fk_ef_object FOREIGN KEY (object_entity_id) REFERENCES entities(id) ON DELETE SET NULL,
|
||||||
|
CONSTRAINT fk_ef_insight FOREIGN KEY (source_insight_id) REFERENCES photo_insights(id) ON DELETE SET NULL,
|
||||||
|
CHECK (object_entity_id IS NOT NULL OR object_value IS NOT NULL)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_entity_facts_subject ON entity_facts(subject_entity_id);
|
||||||
|
CREATE INDEX idx_entity_facts_predicate ON entity_facts(predicate);
|
||||||
|
CREATE INDEX idx_entity_facts_status ON entity_facts(status);
|
||||||
|
CREATE INDEX idx_entity_facts_source_photo ON entity_facts(source_photo);
|
||||||
|
|
||||||
|
CREATE TABLE entity_photo_links (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
entity_id INTEGER NOT NULL,
|
||||||
|
file_path TEXT NOT NULL,
|
||||||
|
role TEXT NOT NULL, -- 'subject' | 'location' | 'event' | 'thing'
|
||||||
|
CONSTRAINT fk_epl_entity FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE,
|
||||||
|
UNIQUE(entity_id, file_path, role)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_entity_photo_links_entity ON entity_photo_links(entity_id);
|
||||||
|
CREATE INDEX idx_entity_photo_links_photo ON entity_photo_links(file_path);
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
-- SQLite doesn't support DROP COLUMN directly, so we recreate the table
|
||||||
|
CREATE TABLE photo_insights_backup AS SELECT id, file_path, title, summary, generated_at, model_version, is_current FROM photo_insights;
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
CREATE TABLE photo_insights (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
file_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT TRUE
|
||||||
|
);
|
||||||
|
INSERT INTO photo_insights SELECT * FROM photo_insights_backup;
|
||||||
|
DROP TABLE photo_insights_backup;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TABLE photo_insights ADD COLUMN training_messages TEXT;
|
||||||
|
ALTER TABLE photo_insights ADD COLUMN approved BOOLEAN;
|
||||||
155
migrations/2026-04-17-000000_multi_library/down.sql
Normal file
155
migrations/2026-04-17-000000_multi_library/down.sql
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
-- Revert multi-library support.
|
||||||
|
-- Drops library_id/content_hash/size_bytes, renames rel_path back to the
|
||||||
|
-- original column names, and drops the libraries table. Rows originally
|
||||||
|
-- from non-primary libraries (id > 1) would be orphaned, so the rollback
|
||||||
|
-- keeps only rows from library_id=1.
|
||||||
|
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
|
||||||
|
-- tagged_photo: rel_path → photo_name.
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_relpath_tag;
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_rel_path;
|
||||||
|
ALTER TABLE tagged_photo RENAME COLUMN rel_path TO photo_name;
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tagged_photo_photo_name ON tagged_photo(photo_name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_tagged_photo_count ON tagged_photo(photo_name, tag_id);
|
||||||
|
|
||||||
|
-- favorites: rel_path → path.
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_unique;
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_rel_path;
|
||||||
|
ALTER TABLE favorites RENAME COLUMN rel_path TO path;
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_favorites_path ON favorites(path);
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_favorites_unique ON favorites(userid, path);
|
||||||
|
|
||||||
|
-- video_preview_clips: drop library_id, rel_path → file_path.
|
||||||
|
CREATE TABLE video_preview_clips_old (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending',
|
||||||
|
duration_seconds REAL,
|
||||||
|
file_size_bytes INTEGER,
|
||||||
|
error_message TEXT,
|
||||||
|
created_at TEXT NOT NULL,
|
||||||
|
updated_at TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO video_preview_clips_old (
|
||||||
|
id, file_path, status, duration_seconds, file_size_bytes,
|
||||||
|
error_message, created_at, updated_at
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, rel_path, status, duration_seconds, file_size_bytes,
|
||||||
|
error_message, created_at, updated_at
|
||||||
|
FROM video_preview_clips
|
||||||
|
WHERE library_id = 1;
|
||||||
|
|
||||||
|
DROP TABLE video_preview_clips;
|
||||||
|
ALTER TABLE video_preview_clips_old RENAME TO video_preview_clips;
|
||||||
|
|
||||||
|
CREATE INDEX idx_preview_clips_file_path ON video_preview_clips(file_path);
|
||||||
|
CREATE INDEX idx_preview_clips_status ON video_preview_clips(status);
|
||||||
|
|
||||||
|
-- entity_photo_links: drop library_id, rel_path → file_path.
|
||||||
|
CREATE TABLE entity_photo_links_old (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
entity_id INTEGER NOT NULL,
|
||||||
|
file_path TEXT NOT NULL,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
CONSTRAINT fk_epl_entity FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE,
|
||||||
|
UNIQUE(entity_id, file_path, role)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO entity_photo_links_old (id, entity_id, file_path, role)
|
||||||
|
SELECT id, entity_id, rel_path, role
|
||||||
|
FROM entity_photo_links
|
||||||
|
WHERE library_id = 1;
|
||||||
|
|
||||||
|
DROP TABLE entity_photo_links;
|
||||||
|
ALTER TABLE entity_photo_links_old RENAME TO entity_photo_links;
|
||||||
|
|
||||||
|
CREATE INDEX idx_entity_photo_links_entity ON entity_photo_links(entity_id);
|
||||||
|
CREATE INDEX idx_entity_photo_links_photo ON entity_photo_links(file_path);
|
||||||
|
|
||||||
|
-- photo_insights: drop library_id, rel_path → file_path.
|
||||||
|
CREATE TABLE photo_insights_old (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT 0,
|
||||||
|
training_messages TEXT,
|
||||||
|
approved BOOLEAN
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO photo_insights_old (
|
||||||
|
id, file_path, title, summary, generated_at, model_version, is_current,
|
||||||
|
training_messages, approved
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, rel_path, title, summary, generated_at, model_version, is_current,
|
||||||
|
training_messages, approved
|
||||||
|
FROM photo_insights
|
||||||
|
WHERE library_id = 1;
|
||||||
|
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
ALTER TABLE photo_insights_old RENAME TO photo_insights;
|
||||||
|
|
||||||
|
CREATE INDEX idx_photo_insights_file_path ON photo_insights(file_path);
|
||||||
|
CREATE INDEX idx_photo_insights_current ON photo_insights(file_path, is_current);
|
||||||
|
|
||||||
|
-- image_exif: drop library_id/content_hash/size_bytes, rel_path → file_path.
|
||||||
|
CREATE TABLE image_exif_old (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
camera_make TEXT,
|
||||||
|
camera_model TEXT,
|
||||||
|
lens_model TEXT,
|
||||||
|
width INTEGER,
|
||||||
|
height INTEGER,
|
||||||
|
orientation INTEGER,
|
||||||
|
gps_latitude REAL,
|
||||||
|
gps_longitude REAL,
|
||||||
|
gps_altitude REAL,
|
||||||
|
focal_length REAL,
|
||||||
|
aperture REAL,
|
||||||
|
shutter_speed TEXT,
|
||||||
|
iso INTEGER,
|
||||||
|
date_taken BIGINT,
|
||||||
|
created_time BIGINT NOT NULL,
|
||||||
|
last_modified BIGINT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO image_exif_old (
|
||||||
|
id, file_path,
|
||||||
|
camera_make, camera_model, lens_model,
|
||||||
|
width, height, orientation,
|
||||||
|
gps_latitude, gps_longitude, gps_altitude,
|
||||||
|
focal_length, aperture, shutter_speed, iso, date_taken,
|
||||||
|
created_time, last_modified
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, rel_path,
|
||||||
|
camera_make, camera_model, lens_model,
|
||||||
|
width, height, orientation,
|
||||||
|
gps_latitude, gps_longitude, gps_altitude,
|
||||||
|
focal_length, aperture, shutter_speed, iso, date_taken,
|
||||||
|
created_time, last_modified
|
||||||
|
FROM image_exif
|
||||||
|
WHERE library_id = 1;
|
||||||
|
|
||||||
|
DROP TABLE image_exif;
|
||||||
|
ALTER TABLE image_exif_old RENAME TO image_exif;
|
||||||
|
|
||||||
|
CREATE INDEX idx_image_exif_file_path ON image_exif(file_path);
|
||||||
|
CREATE INDEX idx_image_exif_camera ON image_exif(camera_make, camera_model);
|
||||||
|
CREATE INDEX idx_image_exif_gps ON image_exif(gps_latitude, gps_longitude);
|
||||||
|
CREATE INDEX idx_image_exif_date_taken ON image_exif(date_taken);
|
||||||
|
CREATE INDEX idx_image_exif_date_path ON image_exif(date_taken DESC, file_path);
|
||||||
|
|
||||||
|
-- Finally, drop the libraries registry.
|
||||||
|
DROP TABLE libraries;
|
||||||
|
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
|
||||||
|
ANALYZE;
|
||||||
216
migrations/2026-04-17-000000_multi_library/up.sql
Normal file
216
migrations/2026-04-17-000000_multi_library/up.sql
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
-- Multi-library support.
|
||||||
|
-- Adds `libraries` registry table and a `library_id` column on per-instance
|
||||||
|
-- metadata tables. Renames `file_path` / `photo_name` to `rel_path` for
|
||||||
|
-- semantic clarity (values already stored relative to BASE_PATH).
|
||||||
|
-- Adds `content_hash` + `size_bytes` to `image_exif` to support
|
||||||
|
-- content-based dedup of thumbnails and HLS output across libraries.
|
||||||
|
--
|
||||||
|
-- SQLite cannot alter column constraints in place, so per-instance tables
|
||||||
|
-- are recreated following the idiom established in
|
||||||
|
-- 2026-04-02-000000_photo_insights_history/up.sql. Existing row `id`s are
|
||||||
|
-- preserved so foreign keys (entity_facts.source_insight_id, etc.) remain
|
||||||
|
-- valid after migration.
|
||||||
|
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 1. Libraries registry.
|
||||||
|
-- Seeded with a placeholder for the primary library; AppState patches
|
||||||
|
-- `root_path` from the BASE_PATH env var on first boot. Subsequent
|
||||||
|
-- prod-to-dev DB syncs update this row via a single SQL UPDATE.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
CREATE TABLE libraries (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
name TEXT NOT NULL UNIQUE,
|
||||||
|
root_path TEXT NOT NULL,
|
||||||
|
created_at BIGINT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO libraries (id, name, root_path, created_at)
|
||||||
|
VALUES (1, 'main', 'BASE_PATH_PLACEHOLDER', strftime('%s','now'));
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 2. image_exif: + library_id, file_path → rel_path, + content_hash/size_bytes.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
CREATE TABLE image_exif_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- Camera information
|
||||||
|
camera_make TEXT,
|
||||||
|
camera_model TEXT,
|
||||||
|
lens_model TEXT,
|
||||||
|
|
||||||
|
-- Image properties
|
||||||
|
width INTEGER,
|
||||||
|
height INTEGER,
|
||||||
|
orientation INTEGER,
|
||||||
|
|
||||||
|
-- GPS
|
||||||
|
gps_latitude REAL,
|
||||||
|
gps_longitude REAL,
|
||||||
|
gps_altitude REAL,
|
||||||
|
|
||||||
|
-- Capture settings
|
||||||
|
focal_length REAL,
|
||||||
|
aperture REAL,
|
||||||
|
shutter_speed TEXT,
|
||||||
|
iso INTEGER,
|
||||||
|
date_taken BIGINT,
|
||||||
|
|
||||||
|
-- Housekeeping
|
||||||
|
created_time BIGINT NOT NULL,
|
||||||
|
last_modified BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- Content identity (backfilled by the `backfill_hashes` binary and by the watcher for new files)
|
||||||
|
content_hash TEXT,
|
||||||
|
size_bytes BIGINT,
|
||||||
|
|
||||||
|
UNIQUE(library_id, rel_path)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO image_exif_new (
|
||||||
|
id, library_id, rel_path,
|
||||||
|
camera_make, camera_model, lens_model,
|
||||||
|
width, height, orientation,
|
||||||
|
gps_latitude, gps_longitude, gps_altitude,
|
||||||
|
focal_length, aperture, shutter_speed, iso, date_taken,
|
||||||
|
created_time, last_modified
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, 1, file_path,
|
||||||
|
camera_make, camera_model, lens_model,
|
||||||
|
width, height, orientation,
|
||||||
|
gps_latitude, gps_longitude, gps_altitude,
|
||||||
|
focal_length, aperture, shutter_speed, iso, date_taken,
|
||||||
|
created_time, last_modified
|
||||||
|
FROM image_exif;
|
||||||
|
|
||||||
|
DROP TABLE image_exif;
|
||||||
|
ALTER TABLE image_exif_new RENAME TO image_exif;
|
||||||
|
|
||||||
|
CREATE INDEX idx_image_exif_rel_path ON image_exif(rel_path);
|
||||||
|
CREATE INDEX idx_image_exif_camera ON image_exif(camera_make, camera_model);
|
||||||
|
CREATE INDEX idx_image_exif_gps ON image_exif(gps_latitude, gps_longitude);
|
||||||
|
CREATE INDEX idx_image_exif_date_taken ON image_exif(date_taken);
|
||||||
|
CREATE INDEX idx_image_exif_date_path ON image_exif(date_taken DESC, rel_path);
|
||||||
|
CREATE INDEX idx_image_exif_lib_date ON image_exif(library_id, date_taken);
|
||||||
|
CREATE INDEX idx_image_exif_content_hash ON image_exif(content_hash);
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 3. photo_insights: + library_id, file_path → rel_path.
|
||||||
|
-- Preserve `id` so entity_facts.source_insight_id FKs remain valid.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
CREATE TABLE photo_insights_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT 0,
|
||||||
|
training_messages TEXT,
|
||||||
|
approved BOOLEAN
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO photo_insights_new (
|
||||||
|
id, library_id, rel_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, 1, file_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved
|
||||||
|
FROM photo_insights;
|
||||||
|
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
ALTER TABLE photo_insights_new RENAME TO photo_insights;
|
||||||
|
|
||||||
|
CREATE INDEX idx_photo_insights_rel_path ON photo_insights(rel_path);
|
||||||
|
CREATE INDEX idx_photo_insights_current ON photo_insights(library_id, rel_path, is_current);
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 4. entity_photo_links: + library_id, file_path → rel_path.
|
||||||
|
-- Preserves entity FK; UNIQUE now includes library_id to allow the same
|
||||||
|
-- rel_path to link entities in multiple libraries independently.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
CREATE TABLE entity_photo_links_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
entity_id INTEGER NOT NULL,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
CONSTRAINT fk_epl_entity FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE,
|
||||||
|
UNIQUE(entity_id, library_id, rel_path, role)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO entity_photo_links_new (id, entity_id, library_id, rel_path, role)
|
||||||
|
SELECT id, entity_id, 1, file_path, role FROM entity_photo_links;
|
||||||
|
|
||||||
|
DROP TABLE entity_photo_links;
|
||||||
|
ALTER TABLE entity_photo_links_new RENAME TO entity_photo_links;
|
||||||
|
|
||||||
|
CREATE INDEX idx_entity_photo_links_entity ON entity_photo_links(entity_id);
|
||||||
|
CREATE INDEX idx_entity_photo_links_photo ON entity_photo_links(library_id, rel_path);
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 5. video_preview_clips: + library_id, file_path → rel_path.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
CREATE TABLE video_preview_clips_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending',
|
||||||
|
duration_seconds REAL,
|
||||||
|
file_size_bytes INTEGER,
|
||||||
|
error_message TEXT,
|
||||||
|
created_at TEXT NOT NULL,
|
||||||
|
updated_at TEXT NOT NULL,
|
||||||
|
UNIQUE(library_id, rel_path)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO video_preview_clips_new (
|
||||||
|
id, library_id, rel_path, status, duration_seconds, file_size_bytes,
|
||||||
|
error_message, created_at, updated_at
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, 1, file_path, status, duration_seconds, file_size_bytes,
|
||||||
|
error_message, created_at, updated_at
|
||||||
|
FROM video_preview_clips;
|
||||||
|
|
||||||
|
DROP TABLE video_preview_clips;
|
||||||
|
ALTER TABLE video_preview_clips_new RENAME TO video_preview_clips;
|
||||||
|
|
||||||
|
CREATE INDEX idx_preview_clips_rel_path ON video_preview_clips(rel_path);
|
||||||
|
CREATE INDEX idx_preview_clips_status ON video_preview_clips(status);
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 6. favorites: path → rel_path. Library-agnostic (cross-library sharing).
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
ALTER TABLE favorites RENAME COLUMN path TO rel_path;
|
||||||
|
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_path;
|
||||||
|
DROP INDEX IF EXISTS idx_favorites_unique;
|
||||||
|
CREATE INDEX idx_favorites_rel_path ON favorites(rel_path);
|
||||||
|
CREATE UNIQUE INDEX idx_favorites_unique ON favorites(userid, rel_path);
|
||||||
|
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
-- 7. tagged_photo: photo_name → rel_path. Library-agnostic.
|
||||||
|
-- Dedup first so the (rel_path, tag_id) unique index can be created safely.
|
||||||
|
-- ---------------------------------------------------------------------------
|
||||||
|
ALTER TABLE tagged_photo RENAME COLUMN photo_name TO rel_path;
|
||||||
|
|
||||||
|
DELETE FROM tagged_photo
|
||||||
|
WHERE id NOT IN (
|
||||||
|
SELECT MIN(id) FROM tagged_photo GROUP BY rel_path, tag_id
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_photo_name;
|
||||||
|
DROP INDEX IF EXISTS idx_tagged_photo_count;
|
||||||
|
CREATE INDEX idx_tagged_photo_rel_path ON tagged_photo(rel_path);
|
||||||
|
CREATE UNIQUE INDEX idx_tagged_photo_relpath_tag ON tagged_photo(rel_path, tag_id);
|
||||||
|
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
|
||||||
|
ANALYZE;
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- No-op: there's no sensible way to recover which rows originally used
|
||||||
|
-- backslashes, and there's no reason to want backslashes back. The
|
||||||
|
-- deleted duplicates are also gone.
|
||||||
|
SELECT 1;
|
||||||
@@ -0,0 +1,85 @@
|
|||||||
|
-- Normalize `rel_path` columns to forward slashes. Windows ingest
|
||||||
|
-- historically produced a mix of `\` and `/`, which broke lookups and
|
||||||
|
-- caused spurious UNIQUE-constraint violations on re-registration.
|
||||||
|
--
|
||||||
|
-- SQLite enforces UNIQUE per-row during UPDATE, so we have to drop
|
||||||
|
-- losing duplicates BEFORE normalizing. For each table that has a
|
||||||
|
-- UNIQUE on rel_path, we delete rows whose normalized form already
|
||||||
|
-- exists in canonical (forward-slash) form — keeping the existing
|
||||||
|
-- forward-slash row as the survivor. Then a flat UPDATE finishes the
|
||||||
|
-- job for remaining backslash rows.
|
||||||
|
|
||||||
|
-- image_exif: UNIQUE(library_id, rel_path)
|
||||||
|
DELETE FROM image_exif
|
||||||
|
WHERE rel_path LIKE '%\%'
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM image_exif AS other
|
||||||
|
WHERE other.library_id = image_exif.library_id
|
||||||
|
AND other.rel_path = REPLACE(image_exif.rel_path, '\', '/')
|
||||||
|
AND other.id != image_exif.id
|
||||||
|
);
|
||||||
|
UPDATE image_exif
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
-- favorites: UNIQUE(userid, rel_path)
|
||||||
|
DELETE FROM favorites
|
||||||
|
WHERE rel_path LIKE '%\%'
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM favorites AS other
|
||||||
|
WHERE other.userid = favorites.userid
|
||||||
|
AND other.rel_path = REPLACE(favorites.rel_path, '\', '/')
|
||||||
|
AND other.id != favorites.id
|
||||||
|
);
|
||||||
|
UPDATE favorites
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
-- tagged_photo: UNIQUE(rel_path, tag_id)
|
||||||
|
DELETE FROM tagged_photo
|
||||||
|
WHERE rel_path LIKE '%\%'
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM tagged_photo AS other
|
||||||
|
WHERE other.tag_id = tagged_photo.tag_id
|
||||||
|
AND other.rel_path = REPLACE(tagged_photo.rel_path, '\', '/')
|
||||||
|
AND other.id != tagged_photo.id
|
||||||
|
);
|
||||||
|
UPDATE tagged_photo
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
-- entity_photo_links: UNIQUE(entity_id, library_id, rel_path, role)
|
||||||
|
DELETE FROM entity_photo_links
|
||||||
|
WHERE rel_path LIKE '%\%'
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM entity_photo_links AS other
|
||||||
|
WHERE other.entity_id = entity_photo_links.entity_id
|
||||||
|
AND other.library_id = entity_photo_links.library_id
|
||||||
|
AND other.role = entity_photo_links.role
|
||||||
|
AND other.rel_path = REPLACE(entity_photo_links.rel_path, '\', '/')
|
||||||
|
AND other.id != entity_photo_links.id
|
||||||
|
);
|
||||||
|
UPDATE entity_photo_links
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
-- video_preview_clips: UNIQUE(library_id, rel_path)
|
||||||
|
DELETE FROM video_preview_clips
|
||||||
|
WHERE rel_path LIKE '%\%'
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM video_preview_clips AS other
|
||||||
|
WHERE other.library_id = video_preview_clips.library_id
|
||||||
|
AND other.rel_path = REPLACE(video_preview_clips.rel_path, '\', '/')
|
||||||
|
AND other.id != video_preview_clips.id
|
||||||
|
);
|
||||||
|
UPDATE video_preview_clips
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
-- photo_insights has no UNIQUE on rel_path (history table), so a plain
|
||||||
|
-- normalize is safe.
|
||||||
|
UPDATE photo_insights
|
||||||
|
SET rel_path = REPLACE(rel_path, '\', '/')
|
||||||
|
WHERE rel_path LIKE '%\%';
|
||||||
|
|
||||||
|
ANALYZE;
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
-- SQLite can't DROP COLUMN cleanly on older versions; rebuild the table.
|
||||||
|
CREATE TABLE photo_insights_backup AS
|
||||||
|
SELECT id, library_id, rel_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved
|
||||||
|
FROM photo_insights;
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
CREATE TABLE photo_insights (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
training_messages TEXT,
|
||||||
|
approved BOOLEAN
|
||||||
|
);
|
||||||
|
INSERT INTO photo_insights
|
||||||
|
SELECT id, library_id, rel_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved
|
||||||
|
FROM photo_insights_backup;
|
||||||
|
DROP TABLE photo_insights_backup;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE photo_insights ADD COLUMN backend TEXT NOT NULL DEFAULT 'local';
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
-- SQLite can't DROP COLUMN cleanly on older versions; rebuild the table.
|
||||||
|
CREATE TABLE photo_insights_backup AS
|
||||||
|
SELECT id, library_id, rel_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved, backend
|
||||||
|
FROM photo_insights;
|
||||||
|
DROP TABLE photo_insights;
|
||||||
|
CREATE TABLE photo_insights (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
library_id INTEGER NOT NULL REFERENCES libraries(id),
|
||||||
|
rel_path TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
summary TEXT NOT NULL,
|
||||||
|
generated_at BIGINT NOT NULL,
|
||||||
|
model_version TEXT NOT NULL,
|
||||||
|
is_current BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
training_messages TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
backend TEXT NOT NULL DEFAULT 'local'
|
||||||
|
);
|
||||||
|
INSERT INTO photo_insights
|
||||||
|
SELECT id, library_id, rel_path, title, summary, generated_at, model_version,
|
||||||
|
is_current, training_messages, approved, backend
|
||||||
|
FROM photo_insights_backup;
|
||||||
|
DROP TABLE photo_insights_backup;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE photo_insights ADD COLUMN fewshot_source_ids TEXT;
|
||||||
2
migrations/2026-04-29-000000_add_faces/down.sql
Normal file
2
migrations/2026-04-29-000000_add_faces/down.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
DROP TABLE IF EXISTS face_detections;
|
||||||
|
DROP TABLE IF EXISTS persons;
|
||||||
67
migrations/2026-04-29-000000_add_faces/up.sql
Normal file
67
migrations/2026-04-29-000000_add_faces/up.sql
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
-- Local face recognition tables.
|
||||||
|
--
|
||||||
|
-- `persons` are visual identities (the "who" of a face). The optional
|
||||||
|
-- `entity_id` bridges to the existing knowledge graph `entities` table —
|
||||||
|
-- when set, this person is the visual side of an LLM-extracted entity.
|
||||||
|
-- Don't auto-create entities from persons; the entity table represents
|
||||||
|
-- LLM-extracted knowledge with its own confidence semantics, and silently
|
||||||
|
-- filling it from face detections muddies the provenance.
|
||||||
|
--
|
||||||
|
-- `face_detections` carries one row per detected face on a content_hash,
|
||||||
|
-- plus marker rows with `status='no_faces'` or `status='failed'` so the
|
||||||
|
-- file watcher knows not to re-scan a hash. Keying on `content_hash`
|
||||||
|
-- (cross-library dedup) rather than `(library_id, rel_path)` means the
|
||||||
|
-- same JPEG in two libraries is scanned once. The denormalized `rel_path`
|
||||||
|
-- carries the most-recently-seen path — useful for cluster-thumb URL
|
||||||
|
-- generation; canonical path lookup goes through image_exif.
|
||||||
|
|
||||||
|
CREATE TABLE persons (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
cover_face_id INTEGER, -- backfilled when the first face binds
|
||||||
|
entity_id INTEGER, -- optional bridge to entities(id)
|
||||||
|
created_from_tag BOOLEAN NOT NULL DEFAULT 0,
|
||||||
|
notes TEXT,
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
updated_at BIGINT NOT NULL,
|
||||||
|
CONSTRAINT fk_persons_entity FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE SET NULL,
|
||||||
|
UNIQUE(name COLLATE NOCASE)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_persons_entity ON persons(entity_id);
|
||||||
|
|
||||||
|
CREATE TABLE face_detections (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
library_id INTEGER NOT NULL,
|
||||||
|
content_hash TEXT NOT NULL, -- canonical key (cross-library dedup)
|
||||||
|
rel_path TEXT NOT NULL, -- denormalized; most recently seen
|
||||||
|
bbox_x REAL, -- normalized 0..1; NULL on marker rows
|
||||||
|
bbox_y REAL,
|
||||||
|
bbox_w REAL,
|
||||||
|
bbox_h REAL,
|
||||||
|
embedding BLOB, -- 512×f32 = 2048 bytes; NULL on marker rows
|
||||||
|
confidence REAL, -- detector score
|
||||||
|
source TEXT NOT NULL, -- 'auto' | 'manual'
|
||||||
|
person_id INTEGER,
|
||||||
|
status TEXT NOT NULL DEFAULT 'detected', -- 'detected' | 'no_faces' | 'failed'
|
||||||
|
model_version TEXT NOT NULL, -- e.g. 'buffalo_l'; embedding lineage
|
||||||
|
created_at BIGINT NOT NULL,
|
||||||
|
CONSTRAINT fk_fd_library FOREIGN KEY (library_id) REFERENCES libraries(id),
|
||||||
|
CONSTRAINT fk_fd_person FOREIGN KEY (person_id) REFERENCES persons(id) ON DELETE SET NULL,
|
||||||
|
-- Detected rows carry geometry + embedding; marker rows ('no_faces',
|
||||||
|
-- 'failed') carry neither. CHECK enforces the invariant so manual
|
||||||
|
-- inserts can't slip through with half a row.
|
||||||
|
CONSTRAINT chk_marker CHECK (
|
||||||
|
(status = 'detected' AND bbox_x IS NOT NULL AND embedding IS NOT NULL)
|
||||||
|
OR (status IN ('no_faces','failed') AND bbox_x IS NULL AND embedding IS NULL)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_face_detections_hash ON face_detections(content_hash);
|
||||||
|
CREATE INDEX idx_face_detections_lib_path ON face_detections(library_id, rel_path);
|
||||||
|
CREATE INDEX idx_face_detections_person ON face_detections(person_id);
|
||||||
|
CREATE INDEX idx_face_detections_status ON face_detections(status);
|
||||||
|
-- One marker row per (content_hash, status='no_faces') so the file watcher
|
||||||
|
-- doesn't double-mark when a hash is seen on multiple full-scan passes.
|
||||||
|
CREATE UNIQUE INDEX idx_face_detections_no_faces_unique
|
||||||
|
ON face_detections(content_hash) WHERE status = 'no_faces';
|
||||||
2
migrations/2026-04-29-000200_add_is_ignored/down.sql
Normal file
2
migrations/2026-04-29-000200_add_is_ignored/down.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
DROP INDEX IF EXISTS idx_persons_is_ignored;
|
||||||
|
ALTER TABLE persons DROP COLUMN is_ignored;
|
||||||
20
migrations/2026-04-29-000200_add_is_ignored/up.sql
Normal file
20
migrations/2026-04-29-000200_add_is_ignored/up.sql
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
-- IGNORE / junk bucket for the face recognition feature.
|
||||||
|
--
|
||||||
|
-- An "Ignored" person is the destination for strangers, faces the user
|
||||||
|
-- doesn't want tagged, and false detections. It looks like any other
|
||||||
|
-- person row (so face_detections.person_id stays a clean foreign key)
|
||||||
|
-- but `is_ignored=1` flags it for special UI treatment:
|
||||||
|
-- - hidden from the persons list by default
|
||||||
|
-- - excluded from `find_persons_by_names_ci` so a tag-name match
|
||||||
|
-- can never auto-bind a real face to the ignore bucket
|
||||||
|
-- - cluster-suggest already filters by `person_id IS NULL`, so faces
|
||||||
|
-- bound to an ignored person are naturally excluded from future
|
||||||
|
-- re-clustering
|
||||||
|
--
|
||||||
|
-- Partial index because the WHERE-clause is small (typically 1 row),
|
||||||
|
-- and we only ever query for `is_ignored = 1` to find the bucket.
|
||||||
|
|
||||||
|
ALTER TABLE persons ADD COLUMN is_ignored BOOLEAN NOT NULL DEFAULT 0;
|
||||||
|
|
||||||
|
CREATE INDEX idx_persons_is_ignored
|
||||||
|
ON persons(is_ignored) WHERE is_ignored = 1;
|
||||||
36
specs/001-video-wall/checklists/requirements.md
Normal file
36
specs/001-video-wall/checklists/requirements.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Specification Quality Checklist: VideoWall
|
||||||
|
|
||||||
|
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||||
|
**Created**: 2026-02-25
|
||||||
|
**Feature**: [spec.md](../spec.md)
|
||||||
|
|
||||||
|
## Content Quality
|
||||||
|
|
||||||
|
- [x] No implementation details (languages, frameworks, APIs)
|
||||||
|
- [x] Focused on user value and business needs
|
||||||
|
- [x] Written for non-technical stakeholders
|
||||||
|
- [x] All mandatory sections completed
|
||||||
|
|
||||||
|
## Requirement Completeness
|
||||||
|
|
||||||
|
- [x] No [NEEDS CLARIFICATION] markers remain
|
||||||
|
- [x] Requirements are testable and unambiguous
|
||||||
|
- [x] Success criteria are measurable
|
||||||
|
- [x] Success criteria are technology-agnostic (no implementation details)
|
||||||
|
- [x] All acceptance scenarios are defined
|
||||||
|
- [x] Edge cases are identified
|
||||||
|
- [x] Scope is clearly bounded
|
||||||
|
- [x] Dependencies and assumptions identified
|
||||||
|
|
||||||
|
## Feature Readiness
|
||||||
|
|
||||||
|
- [x] All functional requirements have clear acceptance criteria
|
||||||
|
- [x] User scenarios cover primary flows
|
||||||
|
- [x] Feature meets measurable outcomes defined in Success Criteria
|
||||||
|
- [x] No implementation details leak into specification
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- All items pass validation.
|
||||||
|
- Assumptions section documents reasonable defaults for format choice, column layout interpretation, and infrastructure reuse.
|
||||||
|
- No [NEEDS CLARIFICATION] markers were needed — the user description was specific enough to make informed decisions for all requirements.
|
||||||
91
specs/001-video-wall/contracts/api-endpoints.md
Normal file
91
specs/001-video-wall/contracts/api-endpoints.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# API Contracts: VideoWall
|
||||||
|
|
||||||
|
## GET /video/preview
|
||||||
|
|
||||||
|
Retrieve the preview clip MP4 file for a given video. If the preview is not yet generated, triggers on-demand generation and returns 202.
|
||||||
|
|
||||||
|
**Authentication**: Required (Bearer token)
|
||||||
|
|
||||||
|
**Query Parameters**:
|
||||||
|
|
||||||
|
| Parameter | Type | Required | Description |
|
||||||
|
|-----------|------|----------|-------------|
|
||||||
|
| path | string | yes | Relative path of the source video from BASE_PATH |
|
||||||
|
|
||||||
|
**Responses**:
|
||||||
|
|
||||||
|
| Status | Content-Type | Body | Description |
|
||||||
|
|--------|-------------|------|-------------|
|
||||||
|
| 200 | video/mp4 | MP4 file stream | Preview clip is ready and served |
|
||||||
|
| 202 | application/json | `{"status": "processing", "path": "<path>"}` | Preview generation has been triggered; client should retry |
|
||||||
|
| 400 | application/json | `{"error": "Invalid path"}` | Path validation failed |
|
||||||
|
| 404 | application/json | `{"error": "Video not found"}` | Source video does not exist |
|
||||||
|
| 500 | application/json | `{"error": "Generation failed: <detail>"}` | Preview generation failed |
|
||||||
|
|
||||||
|
**Behavior**:
|
||||||
|
1. Validate path with `is_valid_full_path()`
|
||||||
|
2. Check if preview clip exists on disk and status is `complete` → serve MP4 (200)
|
||||||
|
3. If status is `pending` or no record exists → trigger generation, return 202
|
||||||
|
4. If status is `processing` → return 202
|
||||||
|
5. If status is `failed` → return 500 with error detail
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## POST /video/preview/status
|
||||||
|
|
||||||
|
Check the preview generation status for a batch of video paths. Used by the mobile app to determine which previews are ready before requesting them.
|
||||||
|
|
||||||
|
**Authentication**: Required (Bearer token)
|
||||||
|
|
||||||
|
**Request Body** (application/json):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"paths": [
|
||||||
|
"2024/vacation/beach.mov",
|
||||||
|
"2024/vacation/sunset.mp4",
|
||||||
|
"2024/birthday.avi"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Required | Description |
|
||||||
|
|-------|------|----------|-------------|
|
||||||
|
| paths | string[] | yes | Array of relative video paths from BASE_PATH |
|
||||||
|
|
||||||
|
**Response** (200, application/json):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"previews": [
|
||||||
|
{
|
||||||
|
"path": "2024/vacation/beach.mov",
|
||||||
|
"status": "complete",
|
||||||
|
"preview_url": "/video/preview?path=2024/vacation/beach.mov"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "2024/vacation/sunset.mp4",
|
||||||
|
"status": "processing",
|
||||||
|
"preview_url": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "2024/birthday.avi",
|
||||||
|
"status": "pending",
|
||||||
|
"preview_url": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| previews | object[] | Status for each requested path |
|
||||||
|
| previews[].path | string | The requested video path |
|
||||||
|
| previews[].status | string | One of: `pending`, `processing`, `complete`, `failed`, `not_found` |
|
||||||
|
| previews[].preview_url | string? | Relative URL to fetch the preview (only when status is `complete`) |
|
||||||
|
|
||||||
|
**Behavior**:
|
||||||
|
1. Accept up to 200 paths per request
|
||||||
|
2. Batch query the `video_preview_clips` table for all paths
|
||||||
|
3. For paths not in the table, return status `not_found` (video may not exist or hasn't been scanned yet)
|
||||||
|
4. Return results in the same order as the input paths
|
||||||
62
specs/001-video-wall/data-model.md
Normal file
62
specs/001-video-wall/data-model.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Data Model: VideoWall
|
||||||
|
|
||||||
|
## Entities
|
||||||
|
|
||||||
|
### VideoPreviewClip
|
||||||
|
|
||||||
|
Tracks the generation status and metadata of preview clips derived from source videos.
|
||||||
|
|
||||||
|
**Table**: `video_preview_clips`
|
||||||
|
|
||||||
|
| Field | Type | Constraints | Description |
|
||||||
|
|-------|------|-------------|-------------|
|
||||||
|
| id | INTEGER | PRIMARY KEY, AUTOINCREMENT | Unique identifier |
|
||||||
|
| file_path | TEXT | NOT NULL, UNIQUE | Relative path of the source video from BASE_PATH |
|
||||||
|
| status | TEXT | NOT NULL, DEFAULT 'pending' | Generation status: `pending`, `processing`, `complete`, `failed` |
|
||||||
|
| duration_seconds | REAL | NULLABLE | Duration of the generated preview clip (≤10s) |
|
||||||
|
| file_size_bytes | INTEGER | NULLABLE | Size of the generated MP4 file |
|
||||||
|
| error_message | TEXT | NULLABLE | Error details if status is `failed` |
|
||||||
|
| created_at | TEXT | NOT NULL | ISO 8601 timestamp when record was created |
|
||||||
|
| updated_at | TEXT | NOT NULL | ISO 8601 timestamp when record was last updated |
|
||||||
|
|
||||||
|
**Indexes**:
|
||||||
|
- `idx_preview_clips_file_path` on `file_path` (unique, used for lookups and batch queries)
|
||||||
|
- `idx_preview_clips_status` on `status` (used by file watcher to find pending/failed clips)
|
||||||
|
|
||||||
|
### Relationships
|
||||||
|
|
||||||
|
- **VideoPreviewClip → Source Video**: One-to-one via `file_path`. The preview clip file on disk is located at `{PREVIEW_CLIPS_DIRECTORY}/{file_path}.mp4`.
|
||||||
|
- **VideoPreviewClip → image_exif**: Implicit relationship via shared `file_path`. No foreign key needed — the EXIF table may not have an entry for every video.
|
||||||
|
|
||||||
|
## State Transitions
|
||||||
|
|
||||||
|
```
|
||||||
|
[new video detected] → pending
|
||||||
|
pending → processing (when generation starts)
|
||||||
|
processing → complete (when ffmpeg succeeds)
|
||||||
|
processing → failed (when ffmpeg fails or times out)
|
||||||
|
failed → pending (on retry / re-scan)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Validation Rules
|
||||||
|
|
||||||
|
- `file_path` must be a valid relative path within BASE_PATH
|
||||||
|
- `status` must be one of: `pending`, `processing`, `complete`, `failed`
|
||||||
|
- `duration_seconds` must be > 0 and ≤ 10.0 when status is `complete`
|
||||||
|
- `file_size_bytes` must be > 0 when status is `complete`
|
||||||
|
- `error_message` should only be non-null when status is `failed`
|
||||||
|
|
||||||
|
## Storage Layout (Filesystem)
|
||||||
|
|
||||||
|
```
|
||||||
|
{PREVIEW_CLIPS_DIRECTORY}/
|
||||||
|
├── 2024/
|
||||||
|
│ ├── vacation/
|
||||||
|
│ │ ├── beach.mp4 # Preview for BASE_PATH/2024/vacation/beach.mov
|
||||||
|
│ │ └── sunset.mp4 # Preview for BASE_PATH/2024/vacation/sunset.mp4
|
||||||
|
│ └── birthday.mp4 # Preview for BASE_PATH/2024/birthday.avi
|
||||||
|
└── 2025/
|
||||||
|
└── trip.mp4 # Preview for BASE_PATH/2025/trip.mkv
|
||||||
|
```
|
||||||
|
|
||||||
|
All preview clips use `.mp4` extension regardless of source format.
|
||||||
79
specs/001-video-wall/plan.md
Normal file
79
specs/001-video-wall/plan.md
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Implementation Plan: VideoWall
|
||||||
|
|
||||||
|
**Branch**: `001-video-wall` | **Date**: 2026-02-25 | **Spec**: [spec.md](./spec.md)
|
||||||
|
**Input**: Feature specification from `/specs/001-video-wall/spec.md`
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Add a VideoWall feature spanning the Rust API backend and React Native mobile app. The backend generates 480p MP4 preview clips (up to 10 seconds, composed of 10 equally spaced 1-second segments) using ffmpeg, extending the existing `OverviewVideo` pattern in `src/video/ffmpeg.rs`. The mobile app adds a VideoWall view using `expo-video` and FlatList to display a responsive 2-3 column grid of simultaneously looping, muted preview clips with audio-on-long-press. Preview clips are cached on disk, served via new API endpoints, and generated proactively by the file watcher.
|
||||||
|
|
||||||
|
## Technical Context
|
||||||
|
|
||||||
|
**Language/Version**: Rust (stable, Cargo) for backend API; TypeScript / React Native (Expo SDK 52) for mobile app
|
||||||
|
**Primary Dependencies**: actix-web 4, Diesel 2.2 (SQLite), ffmpeg/ffprobe (CLI), expo-video 3.0, expo-router 6.0, react-native-reanimated 4.1
|
||||||
|
**Storage**: SQLite (preview clip status tracking), filesystem (MP4 preview clips in `PREVIEW_CLIPS_DIRECTORY`)
|
||||||
|
**Testing**: `cargo test` for backend; manual testing for mobile app
|
||||||
|
**Target Platform**: Linux server (API), iOS/Android (mobile app via Expo)
|
||||||
|
**Project Type**: Mobile app + REST API (two separate repositories)
|
||||||
|
**Performance Goals**: <3s VideoWall load for 50 pre-generated previews; <30s per clip generation; <5MB per clip; smooth simultaneous playback of 6-12 clips
|
||||||
|
**Constraints**: Semaphore-limited concurrent ffmpeg processes (existing pattern); 480p resolution to keep bandwidth/CPU manageable; audio track preserved but muted by default
|
||||||
|
**Scale/Scope**: Hundreds to low thousands of videos per library; single user at a time
|
||||||
|
|
||||||
|
## Constitution Check
|
||||||
|
|
||||||
|
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
|
||||||
|
|
||||||
|
Constitution is an unfilled template — no project-specific gates defined. **PASS** (no violations possible).
|
||||||
|
|
||||||
|
Post-Phase 1 re-check: Still PASS — no gates to evaluate.
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
### Documentation (this feature)
|
||||||
|
|
||||||
|
```text
|
||||||
|
specs/001-video-wall/
|
||||||
|
├── plan.md # This file
|
||||||
|
├── research.md # Phase 0 output
|
||||||
|
├── data-model.md # Phase 1 output
|
||||||
|
├── quickstart.md # Phase 1 output
|
||||||
|
├── contracts/ # Phase 1 output
|
||||||
|
│ └── api-endpoints.md
|
||||||
|
└── tasks.md # Phase 2 output (/speckit.tasks command)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Source Code (repository root)
|
||||||
|
|
||||||
|
```text
|
||||||
|
# Backend (ImageApi - Rust)
|
||||||
|
src/
|
||||||
|
├── video/
|
||||||
|
│ ├── ffmpeg.rs # Add generate_preview_clip() using existing pattern
|
||||||
|
│ ├── actors.rs # Add PreviewClipGenerator actor (semaphore-limited)
|
||||||
|
│ └── mod.rs # Add generate_preview_clips() batch function
|
||||||
|
├── main.rs # Add GET /video/preview, POST /video/preview/status endpoints
|
||||||
|
│ # Extend file watcher to trigger preview generation
|
||||||
|
├── database/
|
||||||
|
│ ├── schema.rs # Add video_preview_clips table
|
||||||
|
│ └── models.rs # Add VideoPreviewClip model
|
||||||
|
│ └── preview_dao.rs # New DAO for preview clip status tracking
|
||||||
|
└── data/
|
||||||
|
└── mod.rs # Add PreviewClipRequest, PreviewStatusRequest types
|
||||||
|
|
||||||
|
# Frontend (SynologyFileViewer - React Native)
|
||||||
|
app/(app)/grid/
|
||||||
|
├── video-wall.tsx # New VideoWall view (FlatList grid)
|
||||||
|
└── _layout.tsx # Add video-wall route to stack
|
||||||
|
|
||||||
|
components/
|
||||||
|
└── VideoWallItem.tsx # Single preview clip cell (expo-video player)
|
||||||
|
|
||||||
|
hooks/
|
||||||
|
└── useVideoWall.ts # Preview clip fetching, status polling, audio state
|
||||||
|
```
|
||||||
|
|
||||||
|
**Structure Decision**: Mobile + API pattern. Backend changes extend existing `src/video/` module and `src/main.rs` handlers following established conventions. Frontend adds a new route under the existing grid stack navigator with a dedicated component and hook.
|
||||||
|
|
||||||
|
## Complexity Tracking
|
||||||
|
|
||||||
|
No constitution violations to justify.
|
||||||
115
specs/001-video-wall/quickstart.md
Normal file
115
specs/001-video-wall/quickstart.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Quickstart: VideoWall
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Rust toolchain (stable) with `cargo`
|
||||||
|
- `diesel_cli` installed (`cargo install diesel_cli --no-default-features --features sqlite`)
|
||||||
|
- ffmpeg and ffprobe available on PATH
|
||||||
|
- Node.js 18+ and Expo CLI for mobile app
|
||||||
|
- `.env` file configured with existing variables plus `PREVIEW_CLIPS_DIRECTORY`
|
||||||
|
|
||||||
|
## New Environment Variable
|
||||||
|
|
||||||
|
Add to `.env`:
|
||||||
|
```bash
|
||||||
|
PREVIEW_CLIPS_DIRECTORY=/path/to/preview-clips # Directory for generated preview MP4s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backend Development
|
||||||
|
|
||||||
|
### 1. Create database migration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd C:\Users\ccord\RustroverProjects\ImageApi
|
||||||
|
diesel migration generate create_video_preview_clips
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit the generated `up.sql`:
|
||||||
|
```sql
|
||||||
|
CREATE TABLE video_preview_clips (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||||
|
file_path TEXT NOT NULL UNIQUE,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending',
|
||||||
|
duration_seconds REAL,
|
||||||
|
file_size_bytes INTEGER,
|
||||||
|
error_message TEXT,
|
||||||
|
created_at TEXT NOT NULL,
|
||||||
|
updated_at TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_preview_clips_file_path ON video_preview_clips(file_path);
|
||||||
|
CREATE INDEX idx_preview_clips_status ON video_preview_clips(status);
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit `down.sql`:
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS video_preview_clips;
|
||||||
|
```
|
||||||
|
|
||||||
|
Regenerate schema:
|
||||||
|
```bash
|
||||||
|
diesel migration run
|
||||||
|
diesel print-schema > src/database/schema.rs
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Build and test backend
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build
|
||||||
|
cargo test
|
||||||
|
cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
Test preview endpoint:
|
||||||
|
```bash
|
||||||
|
# Check preview status
|
||||||
|
curl -X POST http://localhost:8080/video/preview/status \
|
||||||
|
-H "Authorization: Bearer <token>" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"paths": ["some/video.mp4"]}'
|
||||||
|
|
||||||
|
# Request preview clip
|
||||||
|
curl http://localhost:8080/video/preview?path=some/video.mp4 \
|
||||||
|
-H "Authorization: Bearer <token>" \
|
||||||
|
-o preview.mp4
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Verify preview clip generation
|
||||||
|
|
||||||
|
Check that preview clips appear in `PREVIEW_CLIPS_DIRECTORY` with the expected directory structure mirroring `BASE_PATH`.
|
||||||
|
|
||||||
|
## Frontend Development
|
||||||
|
|
||||||
|
### 1. Start the mobile app
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd C:\Users\ccord\development\SynologyFileViewer
|
||||||
|
npx expo start
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Navigate to VideoWall
|
||||||
|
|
||||||
|
From the grid view of any folder containing videos, switch to VideoWall mode. The view should display a 2-3 column grid of looping preview clips.
|
||||||
|
|
||||||
|
## Key Files to Modify
|
||||||
|
|
||||||
|
### Backend (ImageApi)
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `src/video/ffmpeg.rs` | Add `generate_preview_clip()` function |
|
||||||
|
| `src/video/actors.rs` | Add `PreviewClipGenerator` actor |
|
||||||
|
| `src/video/mod.rs` | Add `generate_preview_clips()` batch function |
|
||||||
|
| `src/main.rs` | Add endpoints, extend file watcher |
|
||||||
|
| `src/database/schema.rs` | Regenerated by Diesel |
|
||||||
|
| `src/database/models.rs` | Add `VideoPreviewClip` struct |
|
||||||
|
| `src/database/preview_dao.rs` | New DAO file |
|
||||||
|
| `src/data/mod.rs` | Add request/response types |
|
||||||
|
| `src/state.rs` | Add PreviewClipGenerator to AppState |
|
||||||
|
|
||||||
|
### Frontend (SynologyFileViewer)
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `app/(app)/grid/video-wall.tsx` | New VideoWall view |
|
||||||
|
| `app/(app)/grid/_layout.tsx` | Add route |
|
||||||
|
| `components/VideoWallItem.tsx` | New preview clip cell component |
|
||||||
|
| `hooks/useVideoWall.ts` | New hook for preview state management |
|
||||||
91
specs/001-video-wall/research.md
Normal file
91
specs/001-video-wall/research.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Research: VideoWall
|
||||||
|
|
||||||
|
## R1: FFmpeg Preview Clip Generation Strategy
|
||||||
|
|
||||||
|
**Decision**: Use ffmpeg's `select` filter with segment-based extraction, extending the existing `OverviewVideo` pattern in `src/video/ffmpeg.rs`.
|
||||||
|
|
||||||
|
**Rationale**: The codebase already has a nearly identical pattern at `src/video/ffmpeg.rs` using `select='lt(mod(t,{interval}),1)'` which selects 1-second frames at evenly spaced intervals across the video duration. The existing pattern outputs GIF; we adapt it to output MP4 at 480p with audio.
|
||||||
|
|
||||||
|
**Approach**:
|
||||||
|
1. Use `ffprobe` to get video duration (existing `get_video_duration()` pattern)
|
||||||
|
2. Calculate interval: `duration / 10` (or fewer segments for short videos)
|
||||||
|
3. Use ffmpeg with:
|
||||||
|
- Video filter: `select='lt(mod(t,{interval}),1)',setpts=N/FRAME_RATE/TB,scale=-2:480`
|
||||||
|
- Audio filter: `aselect='lt(mod(t,{interval}),1)',asetpts=N/SR/TB`
|
||||||
|
- Output: MP4 with H.264 video + AAC audio
|
||||||
|
- CRF 28 (lower quality acceptable for previews, reduces file size)
|
||||||
|
- Preset: `veryfast` (matches existing HLS transcoding pattern)
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- Generating separate segment files and concatenating: More complex, no benefit over select filter
|
||||||
|
- Using GIF output: Rejected per clarification — MP4 is 5-10x smaller with better quality
|
||||||
|
- Stream copy (no transcode): Not possible since we're extracting non-contiguous segments
|
||||||
|
|
||||||
|
## R2: Preview Clip Storage and Caching
|
||||||
|
|
||||||
|
**Decision**: Store preview clips on filesystem in a dedicated `PREVIEW_CLIPS_DIRECTORY` mirroring the source directory structure (same pattern as `THUMBNAILS` and `GIFS_DIRECTORY`).
|
||||||
|
|
||||||
|
**Rationale**: The project already uses this directory-mirroring pattern for thumbnails and GIF previews. It's simple, requires no database for file lookup (path is deterministic), and integrates naturally with the existing file watcher cleanup logic.
|
||||||
|
|
||||||
|
**Storage path formula**: `{PREVIEW_CLIPS_DIRECTORY}/{relative_path_from_BASE_PATH}.mp4`
|
||||||
|
- Example: Video at `BASE_PATH/2024/vacation.mov` → Preview at `PREVIEW_CLIPS_DIRECTORY/2024/vacation.mp4`
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- Database BLOBs: Too large, not suited for binary video files
|
||||||
|
- Content-addressed storage (hash-based): Unnecessary complexity for single-user system
|
||||||
|
- Flat directory with UUID names: Loses the intuitive mapping that thumbnails/GIFs use
|
||||||
|
|
||||||
|
## R3: Preview Generation Status Tracking
|
||||||
|
|
||||||
|
**Decision**: Track generation status in SQLite via a new `video_preview_clips` table with Diesel ORM, following the existing DAO pattern.
|
||||||
|
|
||||||
|
**Rationale**: The batch status endpoint (FR-004) needs to efficiently check which previews are ready for a list of video paths. A database table is the right tool — it supports batch queries (existing `get_exif_batch()` pattern), survives restarts, and tracks failure states. The file watcher already uses batch DB queries to detect unprocessed files.
|
||||||
|
|
||||||
|
**Status values**: `pending`, `processing`, `complete`, `failed`
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- Filesystem-only (check if .mp4 exists): Cannot track `processing` or `failed` states; race conditions on concurrent requests
|
||||||
|
- In-memory HashMap: Lost on restart; doesn't support batch queries efficiently across actor boundaries
|
||||||
|
|
||||||
|
## R4: Concurrent Generation Limits
|
||||||
|
|
||||||
|
**Decision**: Use `Arc<Semaphore>` with a limit of 2 concurrent ffmpeg preview generation processes, matching the existing `PlaylistGenerator` pattern.
|
||||||
|
|
||||||
|
**Rationale**: The `PlaylistGenerator` actor in `src/video/actors.rs` already uses this exact pattern to limit concurrent ffmpeg processes. Preview generation is CPU-intensive (transcoding), so limiting concurrency prevents server overload. The semaphore pattern is proven in this codebase.
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- Unbounded concurrency: Would overwhelm the server with many simultaneous ffmpeg processes
|
||||||
|
- Queue with single worker: Too slow for batch generation; 2 concurrent is a good balance
|
||||||
|
- Sharing the existing PlaylistGenerator semaphore: Would cause HLS generation and preview generation to compete for the same slots; better to keep them independent
|
||||||
|
|
||||||
|
## R5: Mobile App Video Playback Strategy
|
||||||
|
|
||||||
|
**Decision**: Use `expo-video` `VideoView` components inside FlatList items, with muted autoplay and viewport-based pause/resume.
|
||||||
|
|
||||||
|
**Rationale**: The app already uses `expo-video` (v3.0.15) for the single video player in `viewer/video.tsx`. The library supports multiple simultaneous players, `loop` mode, and programmatic mute/unmute. FlatList's `viewabilityConfig` callback can be used to pause/resume players based on viewport visibility.
|
||||||
|
|
||||||
|
**Key configuration per cell**:
|
||||||
|
- `player.loop = true`
|
||||||
|
- `player.muted = true` (default)
|
||||||
|
- `player.play()` when visible, `player.pause()` when offscreen
|
||||||
|
- `nativeControls={false}` (no controls needed in grid)
|
||||||
|
|
||||||
|
**Audio-on-focus**: On long-press, unmute the pressed player and mute all others. Track the "focused" player ID in hook state.
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- HLS streaming for previews: Overkill for <10s clips; direct MP4 download is simpler and faster
|
||||||
|
- Animated GIF display via Image component: Rejected per clarification — MP4 with expo-video is better
|
||||||
|
- WebView-based player: Poor performance, no native gesture integration
|
||||||
|
|
||||||
|
## R6: API Endpoint Design
|
||||||
|
|
||||||
|
**Decision**: Two new endpoints — one to serve preview clips, one for batch status checking.
|
||||||
|
|
||||||
|
**Rationale**:
|
||||||
|
- `GET /video/preview?path=...` serves the MP4 file directly (or triggers on-demand generation and returns 202 Accepted). Follows the pattern of `GET /image?path=...` for serving files.
|
||||||
|
- `POST /video/preview/status` accepts a JSON body with an array of video paths and returns their preview generation status. This allows the mobile app to efficiently determine which previews are ready in a single request (batch pattern from `get_exif_batch()`).
|
||||||
|
|
||||||
|
**Alternatives considered**:
|
||||||
|
- Single endpoint that blocks until generation completes: Bad UX — generation takes up to 30s
|
||||||
|
- WebSocket for real-time status: Overkill for this use case; polling with batch status is simpler
|
||||||
|
- Including preview URL in the existing `/photos` response: Would couple the photo listing endpoint to preview generation; better to keep separate
|
||||||
136
specs/001-video-wall/spec.md
Normal file
136
specs/001-video-wall/spec.md
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# Feature Specification: VideoWall
|
||||||
|
|
||||||
|
**Feature Branch**: `001-video-wall`
|
||||||
|
**Created**: 2026-02-25
|
||||||
|
**Status**: Draft
|
||||||
|
**Input**: User description: "I would like to implement a new View 'VideoWall' in the React native mobile app, with supporting API/tasks to generate at most 10 second long GIF/Videos that are 10 equally spaced 1 second clips of the original video. This view will display a grid 2/3 columns wide of all these clips playing simultaneously. It should let the user view all videos in the current folder/search results."
|
||||||
|
|
||||||
|
## Clarifications
|
||||||
|
|
||||||
|
### Session 2026-02-25
|
||||||
|
|
||||||
|
- Q: What format should preview clips be generated in (GIF vs video)? → A: MP4 video clips (small files, hardware-accelerated playback, best quality-to-size ratio).
|
||||||
|
- Q: What resolution should preview clips be generated at? → A: 480p scaled down (sharp in grid cells, small files, smooth simultaneous playback).
|
||||||
|
- Q: How should audio be handled in preview clips? → A: Audio on focus — muted by default, audio plays when user long-presses on a clip. Audio track is preserved during generation.
|
||||||
|
|
||||||
|
## User Scenarios & Testing *(mandatory)*
|
||||||
|
|
||||||
|
### User Story 1 - Browse Videos as a Visual Wall (Priority: P1)
|
||||||
|
|
||||||
|
A user navigates to a folder containing videos and switches to the VideoWall view. The screen fills with a grid of video previews — short looping clips that give a visual summary of each video. All previews play simultaneously, creating an immersive "wall of motion" that lets the user quickly scan and identify videos of interest without opening each one individually.
|
||||||
|
|
||||||
|
**Why this priority**: This is the core experience. Without the visual grid of simultaneously playing previews, the feature has no value. This story delivers the primary browsing capability.
|
||||||
|
|
||||||
|
**Independent Test**: Can be fully tested by navigating to any folder with videos, switching to VideoWall view, and confirming that preview clips display in a grid and play simultaneously. Delivers immediate visual browsing value.
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** a user is viewing a folder containing 6 videos, **When** they switch to VideoWall view, **Then** they see a grid of 6 video previews arranged in 2-3 columns, all playing simultaneously in a loop.
|
||||||
|
2. **Given** a user is viewing a folder containing 20 videos, **When** they switch to VideoWall view, **Then** the grid is scrollable and loads previews progressively as they scroll.
|
||||||
|
3. **Given** a user is in VideoWall view, **When** they tap on a video preview, **Then** they navigate to the full video player for that video.
|
||||||
|
4. **Given** a user is in VideoWall view with all clips muted, **When** they long-press on a preview clip, **Then** that clip's audio unmutes and all other clips remain muted.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### User Story 2 - Server Generates Preview Clips (Priority: P1)
|
||||||
|
|
||||||
|
When preview clips are requested for a video that has not yet been processed, the server generates a short preview clip. The preview is composed of 10 equally spaced 1-second segments extracted from the original video, concatenated into a single clip of at most 10 seconds. Once generated, the preview is cached so subsequent requests are served instantly.
|
||||||
|
|
||||||
|
**Why this priority**: The VideoWall view depends entirely on having preview clips available. Without server-side generation, there is nothing to display. This is co-priority with Story 1 as they are interdependent.
|
||||||
|
|
||||||
|
**Independent Test**: Can be tested by requesting a preview clip for any video via the API and confirming the response is a playable clip of at most 10 seconds composed of segments from different parts of the original video.
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** a video exists that has no preview clip yet, **When** a preview is requested, **Then** the system generates a clip of at most 10 seconds composed of 10 equally spaced 1-second segments from the original video.
|
||||||
|
2. **Given** a video is shorter than 10 seconds, **When** a preview is requested, **Then** the system generates a preview using fewer segments (as many 1-second clips as the video duration allows), resulting in a shorter preview.
|
||||||
|
3. **Given** a preview clip was previously generated for a video, **When** it is requested again, **Then** the cached version is served without re-processing.
|
||||||
|
4. **Given** a video file no longer exists, **When** a preview is requested, **Then** the system returns an appropriate error indicating the source video is missing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### User Story 3 - VideoWall from Search Results (Priority: P2)
|
||||||
|
|
||||||
|
A user performs a search or applies filters (tags, date range, camera, location) and the results include videos. They switch to VideoWall view to see preview clips of all matching videos displayed in the same grid layout, allowing visual browsing of search results.
|
||||||
|
|
||||||
|
**Why this priority**: Extends the core VideoWall browsing to work with filtered/search result sets. Important for discoverability but depends on Story 1 and 2 being functional first.
|
||||||
|
|
||||||
|
**Independent Test**: Can be tested by performing a search that returns videos, switching to VideoWall view, and confirming that only matching videos appear as previews in the grid.
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** a user has search results containing 8 videos and 12 photos, **When** they switch to VideoWall view, **Then** only the 8 video previews are displayed in the grid.
|
||||||
|
2. **Given** a user applies a tag filter that matches 3 videos, **When** they view the VideoWall, **Then** exactly 3 video previews are shown.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### User Story 4 - Background Preview Generation (Priority: P3)
|
||||||
|
|
||||||
|
Preview clips are generated proactively in the background for videos discovered during file watching, so that when a user opens VideoWall, most previews are already available and the experience feels instant.
|
||||||
|
|
||||||
|
**Why this priority**: Enhances performance and perceived responsiveness. The feature works without this (on-demand generation), but background processing greatly improves the user experience for large libraries.
|
||||||
|
|
||||||
|
**Independent Test**: Can be tested by adding new video files to a monitored folder and confirming that preview clips are generated automatically within the next scan cycle, before any user requests them.
|
||||||
|
|
||||||
|
**Acceptance Scenarios**:
|
||||||
|
|
||||||
|
1. **Given** a new video is added to the media library, **When** the file watcher detects it, **Then** a preview clip is generated in the background without user intervention.
|
||||||
|
2. **Given** the system is generating previews in the background, **When** a user opens VideoWall, **Then** already-generated previews display immediately while pending ones show a placeholder.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Edge Cases
|
||||||
|
|
||||||
|
- What happens when a video is corrupted or cannot be processed? The system shows a placeholder/error state for that video and does not block other previews from loading.
|
||||||
|
- What happens when the user scrolls quickly through a large library? Previews outside the visible viewport should pause or not load to conserve resources, and resume when scrolled back into view.
|
||||||
|
- What happens when a video is extremely long (e.g., 4+ hours)? The same algorithm applies — 10 equally spaced 1-second clips — ensuring the preview still represents the full video.
|
||||||
|
- What happens when a video is exactly 10 seconds long? Each 1-second segment starts at second 0, 1, 2, ... 9, effectively previewing the entire video.
|
||||||
|
- What happens when storage for preview clips runs low? Preview clips should be reasonably compressed and sized to minimize storage impact.
|
||||||
|
- What happens when many previews are requested simultaneously (e.g., opening a folder with 100 videos)? The system should queue generation and serve already-cached previews immediately while others are processed.
|
||||||
|
|
||||||
|
## Requirements *(mandatory)*
|
||||||
|
|
||||||
|
### Functional Requirements
|
||||||
|
|
||||||
|
- **FR-001**: System MUST generate preview clips for videos as MP4 files scaled to 480p resolution, where each preview is composed of up to 10 equally spaced 1-second segments from the original video, resulting in a clip of at most 10 seconds.
|
||||||
|
- **FR-002**: System MUST cache generated preview clips so they are only generated once per source video.
|
||||||
|
- **FR-003**: System MUST provide an endpoint to retrieve a preview clip for a given video path.
|
||||||
|
- **FR-004**: System MUST provide an endpoint to retrieve preview availability status for a batch of video paths so the client knows which previews are ready.
|
||||||
|
- **FR-005**: The mobile app MUST display a VideoWall view showing video previews in a grid of 2 columns on smaller screens and 3 columns on larger screens.
|
||||||
|
- **FR-006**: All visible preview clips in the VideoWall MUST play simultaneously, muted, and loop continuously.
|
||||||
|
- **FR-006a**: When a user long-presses on a preview clip, the app MUST unmute that clip's audio. Only one clip may have audio at a time.
|
||||||
|
- **FR-006b**: Preview clips MUST retain their audio track during generation (not stripped) to support audio-on-focus playback.
|
||||||
|
- **FR-007**: The VideoWall MUST support browsing videos from both folder navigation and search/filter results.
|
||||||
|
- **FR-008**: Tapping a preview clip in the VideoWall MUST navigate the user to the full video.
|
||||||
|
- **FR-009**: For videos shorter than 10 seconds, the system MUST generate a preview using as many full 1-second segments as the video duration allows.
|
||||||
|
- **FR-010**: The system MUST display a placeholder for videos whose preview clips are not yet generated.
|
||||||
|
- **FR-011**: The system MUST handle unprocessable videos gracefully by showing an error state rather than failing the entire wall.
|
||||||
|
- **FR-012**: The VideoWall MUST support scrolling through large numbers of videos, loading previews progressively.
|
||||||
|
- **FR-013**: Preview clips outside the visible viewport SHOULD pause playback to conserve device resources.
|
||||||
|
|
||||||
|
### Key Entities
|
||||||
|
|
||||||
|
- **Video Preview Clip**: A short looping MP4 video (at most 10 seconds) scaled to 480p resolution, derived from a source video. Composed of up to 10 equally spaced 1-second segments. Associated with exactly one source video by file path. Has a generation status (pending, processing, complete, failed).
|
||||||
|
- **VideoWall View**: A scrollable grid layout displaying video preview clips. Adapts column count based on screen size (2 or 3 columns). Operates on a set of videos from a folder or search result context.
|
||||||
|
|
||||||
|
## Success Criteria *(mandatory)*
|
||||||
|
|
||||||
|
### Measurable Outcomes
|
||||||
|
|
||||||
|
- **SC-001**: Users can visually browse all videos in a folder within 3 seconds of opening VideoWall (for folders with up to 50 videos with pre-generated previews).
|
||||||
|
- **SC-002**: Preview clips accurately represent the source video by sampling from evenly distributed points across the full duration.
|
||||||
|
- **SC-003**: All visible previews play simultaneously without noticeable stuttering on standard mobile devices.
|
||||||
|
- **SC-004**: Generated preview clips are each under 5 MB in size to keep storage and bandwidth manageable.
|
||||||
|
- **SC-005**: The VideoWall view correctly filters to show only videos (not photos) from the current folder or search results.
|
||||||
|
- **SC-006**: Users can identify and select a video of interest from the VideoWall and navigate to it in a single tap.
|
||||||
|
- **SC-007**: Preview generation for a single video completes within 30 seconds on typical hardware.
|
||||||
|
|
||||||
|
## Assumptions
|
||||||
|
|
||||||
|
- The existing file watcher and thumbnail generation infrastructure will be extended to also trigger preview clip generation.
|
||||||
|
- Preview clips will be stored alongside existing thumbnails/GIFs in a designated directory on the server.
|
||||||
|
- The React Native mobile app already has folder navigation and search/filter capabilities that provide the video list context for VideoWall.
|
||||||
|
- The server already has ffmpeg available for video processing (used for existing HLS and GIF generation).
|
||||||
|
- Authentication and authorization follow the existing JWT-based pattern; no new auth requirements.
|
||||||
|
- "2/3 columns" means a responsive layout: 2 columns on phones (portrait), 3 columns on tablets or landscape orientation.
|
||||||
|
- Preview clips are generated as MP4 video files for optimal quality-to-size ratio and hardware-accelerated mobile playback.
|
||||||
234
specs/001-video-wall/tasks.md
Normal file
234
specs/001-video-wall/tasks.md
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
# Tasks: VideoWall
|
||||||
|
|
||||||
|
**Input**: Design documents from `/specs/001-video-wall/`
|
||||||
|
**Prerequisites**: plan.md (required), spec.md (required), research.md, data-model.md, contracts/
|
||||||
|
|
||||||
|
**Tests**: Not explicitly requested — test tasks omitted.
|
||||||
|
|
||||||
|
**Organization**: Tasks grouped by user story. US2 (server generation) comes before US1 (mobile view) because the mobile app depends on the API endpoints existing.
|
||||||
|
|
||||||
|
## Format: `[ID] [P?] [Story] Description`
|
||||||
|
|
||||||
|
- **[P]**: Can run in parallel (different files, no dependencies)
|
||||||
|
- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
|
||||||
|
- Include exact file paths in descriptions
|
||||||
|
|
||||||
|
## Path Conventions
|
||||||
|
|
||||||
|
- **Backend (ImageApi)**: `src/` at `C:\Users\ccord\RustroverProjects\ImageApi`
|
||||||
|
- **Frontend (SynologyFileViewer)**: `app/`, `components/`, `hooks/` at `C:\Users\ccord\development\SynologyFileViewer`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 1: Setup (Shared Infrastructure)
|
||||||
|
|
||||||
|
**Purpose**: Database migration, new environment variable, shared types
|
||||||
|
|
||||||
|
- [x] T001 Create Diesel migration for `video_preview_clips` table: run `diesel migration generate create_video_preview_clips`, write `up.sql` with table definition (id, file_path UNIQUE, status DEFAULT 'pending', duration_seconds, file_size_bytes, error_message, created_at, updated_at) and indexes (idx_preview_clips_file_path, idx_preview_clips_status), write `down.sql` with DROP TABLE. See `data-model.md` for full schema.
|
||||||
|
- [x] T002 Run migration and regenerate schema: execute `diesel migration run` then `diesel print-schema > src/database/schema.rs` to add the `video_preview_clips` table to `src/database/schema.rs`
|
||||||
|
- [x] T003 Add `PREVIEW_CLIPS_DIRECTORY` environment variable: read it in `src/main.rs` startup (alongside existing `GIFS_DIRECTORY`), create the directory if it doesn't exist, and add it to `AppState` or pass it where needed. Follow the pattern used for `GIFS_DIRECTORY` and `THUMBNAILS`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Foundational (Blocking Prerequisites)
|
||||||
|
|
||||||
|
**Purpose**: Diesel model, DAO, and request/response types that all user stories depend on
|
||||||
|
|
||||||
|
**CRITICAL**: No user story work can begin until this phase is complete
|
||||||
|
|
||||||
|
- [x] T004 [P] Add `VideoPreviewClip` Diesel model struct in `src/database/models.rs` with fields matching the `video_preview_clips` schema table (Queryable, Insertable derives). Add a `NewVideoPreviewClip` struct for inserts.
|
||||||
|
- [x] T005 [P] Add `PreviewClipRequest` and `PreviewStatusRequest`/`PreviewStatusResponse` types in `src/data/mod.rs`. `PreviewClipRequest` has `path: String`. `PreviewStatusRequest` has `paths: Vec<String>`. `PreviewStatusResponse` has `previews: Vec<PreviewStatusItem>` where each item has `path`, `status`, `preview_url: Option<String>`. All with Serialize/Deserialize derives.
|
||||||
|
- [x] T006 Create `PreviewDao` trait and `SqlitePreviewDao` implementation in `src/database/preview_dao.rs`. Methods: `insert_preview(file_path, status) -> Result`, `update_status(file_path, status, duration_seconds?, file_size_bytes?, error_message?) -> Result`, `get_preview(file_path) -> Result<Option<VideoPreviewClip>>`, `get_previews_batch(file_paths: &[String]) -> Result<Vec<VideoPreviewClip>>`, `get_by_status(status) -> Result<Vec<VideoPreviewClip>>`. Follow the `ExifDao`/`SqliteExifDao` pattern with `Arc<Mutex<SqliteConnection>>` and OpenTelemetry tracing spans.
|
||||||
|
- [x] T007 Register `preview_dao` module in `src/database/mod.rs` and add `PreviewDao` to the database module exports. Wire `SqlitePreviewDao` into `AppState` in `src/state.rs` following the existing DAO pattern (e.g., how `ExifDao` is added).
|
||||||
|
|
||||||
|
**Checkpoint**: Foundation ready — DAO, models, and types available for all stories
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: User Story 2 - Server Generates Preview Clips (Priority: P1) MVP
|
||||||
|
|
||||||
|
**Goal**: Backend can generate 480p MP4 preview clips (10 equally spaced 1-second segments) and serve them via API endpoints with on-demand generation and batch status checking.
|
||||||
|
|
||||||
|
**Independent Test**: Request `GET /video/preview?path=<video>` for any video — should return an MP4 file of at most 10 seconds. Request `POST /video/preview/status` with video paths — should return status for each.
|
||||||
|
|
||||||
|
### Implementation for User Story 2
|
||||||
|
|
||||||
|
- [x] T008 [P] [US2] Add `generate_preview_clip()` function in `src/video/ffmpeg.rs`. Takes input video path, output MP4 path, and video duration. Uses ffprobe to get duration (existing pattern). Calculates interval = `duration / 10` (or fewer for short videos per FR-009). Builds ffmpeg command with: video filter `select='lt(mod(t,{interval}),1)',setpts=N/FRAME_RATE/TB,scale=-2:480`, audio filter `aselect='lt(mod(t,{interval}),1)',asetpts=N/SR/TB`, codec H.264 CRF 28 preset veryfast, AAC audio. Output path uses `.mp4` extension. Creates parent directories for output. Returns `Result<(f64, u64)>` with (duration_seconds, file_size_bytes). See `research.md` R1 for full ffmpeg strategy.
|
||||||
|
- [x] T009 [P] [US2] Create `PreviewClipGenerator` actor in `src/video/actors.rs`. Struct holds `Arc<Semaphore>` (limit 2 concurrent), preview clips directory path, base path, and `Arc<dyn PreviewDao>`. Handles `GeneratePreviewMessage { video_path: String }`: acquires semaphore permit, updates DB status to `processing`, calls `generate_preview_clip()`, updates DB to `complete` with duration/size on success or `failed` with error on failure. Follow the `PlaylistGenerator` actor pattern with `tokio::spawn` for async processing.
|
||||||
|
- [x] T010 [US2] Add `PreviewClipGenerator` actor to `AppState` in `src/state.rs`. Initialize it during server startup in `src/main.rs` with the `PREVIEW_CLIPS_DIRECTORY`, `BASE_PATH`, and preview DAO reference. Start the actor with `PreviewClipGenerator::new(...).start()`.
|
||||||
|
- [x] T011 [US2] Implement `GET /video/preview` handler in `src/main.rs`. Validate path with `is_valid_full_path()`. Check preview DAO for status: if `complete` → serve MP4 file with `NamedFile::open()` (200); if `processing` → return 202 JSON; if `pending`/not found → insert/update record as `pending`, send `GeneratePreviewMessage` to actor, return 202 JSON; if `failed` → return 500 with error. See `contracts/api-endpoints.md` for full response contract.
|
||||||
|
- [x] T012 [US2] Implement `POST /video/preview/status` handler in `src/main.rs`. Accept `PreviewStatusRequest` JSON body. Call `preview_dao.get_previews_batch()` for all paths. Map results: for each path, return status and `preview_url` (only when `complete`). Paths not in DB get status `not_found`. Limit to 200 paths per request. Return `PreviewStatusResponse` JSON.
|
||||||
|
- [x] T013 [US2] Register both new endpoints in route configuration in `src/main.rs`. Add `web::resource("/video/preview").route(web::get().to(get_video_preview))` and `web::resource("/video/preview/status").route(web::post().to(get_preview_status))`. Both require authentication (Claims extraction).
|
||||||
|
- [x] T014 [US2] Handle short videos (< 10 seconds) in `generate_preview_clip()` in `src/video/ffmpeg.rs`. When duration < 10s, calculate segment count as `floor(duration)` and interval as `duration / segment_count`. When duration < 1s, use the entire video as the preview (just transcode to 480p MP4). Add this logic to the interval calculation in T008.
|
||||||
|
|
||||||
|
**Checkpoint**: Backend fully functional — preview clips can be generated, cached, and served via API
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: User Story 1 - Browse Videos as a Visual Wall (Priority: P1) MVP
|
||||||
|
|
||||||
|
**Goal**: Mobile app displays a responsive 2-3 column grid of simultaneously looping, muted video previews with long-press audio and tap-to-navigate.
|
||||||
|
|
||||||
|
**Independent Test**: Navigate to a folder with videos in the app, switch to VideoWall view, confirm grid displays with playing previews. Long-press to hear audio. Tap to open full video.
|
||||||
|
|
||||||
|
### Implementation for User Story 1
|
||||||
|
|
||||||
|
- [x] T015 [P] [US1] Create `useVideoWall` hook in `hooks/useVideoWall.ts` (SynologyFileViewer). Accepts array of `GridItem[]` (video items only, filtered from current files context). Calls `POST /video/preview/status` with video paths on mount to get availability. Returns `{ previewStatuses: Map<string, PreviewStatus>, focusedVideoPath: string | null, setFocusedVideo: (path) => void, refreshStatuses: () => void }`. Uses `authenticatedFetch()` from auth hook. Polls status every 5 seconds for any items still in `pending`/`processing` state, stops polling when all are `complete` or `failed`.
|
||||||
|
- [x] T016 [P] [US1] Create `VideoWallItem` component in `components/VideoWallItem.tsx` (SynologyFileViewer). Renders an `expo-video` `VideoView` for a single preview clip. Props: `videoPath: string, previewStatus: string, isFocused: boolean, onTap: () => void, onLongPress: () => void, isVisible: boolean`. When `previewStatus === 'complete'`: create `useVideoPlayer` with source URL `${baseUrl}/video/preview?path=${videoPath}` and auth headers, set `player.loop = true`, `player.muted = !isFocused`. When `isVisible` is true → `player.play()`, false → `player.pause()`. When status is not complete: show placeholder (thumbnail image from existing `/image?path=&size=thumb` endpoint with a loading indicator overlay). When `failed`: show error icon overlay. Aspect ratio 16:9 with `nativeControls={false}`.
|
||||||
|
- [x] T017 [US1] Create VideoWall view in `app/(app)/grid/video-wall.tsx` (SynologyFileViewer). Use `FlatList` with `numColumns` calculated as `Math.floor(dimensions.width / 180)` (targeting 2-3 columns). Get video items from `FilesContext` — filter `allItems` or `filteredItems` to only include video extensions (use same detection as existing `isVideo()` check). Pass items to `useVideoWall` hook. Use `viewabilityConfig` with `viewAreaCoveragePercentThreshold: 50` and `onViewableItemsChanged` callback to track visible items, passing `isVisible` to each `VideoWallItem`. Implement `keyExtractor` using video path. Add scroll-to-top FAB button following existing grid pattern.
|
||||||
|
- [x] T018 [US1] Add `video-wall` route to stack navigator in `app/(app)/grid/_layout.tsx` (SynologyFileViewer). Add `<Stack.Screen name="video-wall" options={{ title: "Video Wall" }} />` to the existing Stack navigator.
|
||||||
|
- [x] T019 [US1] Add navigation entry point to switch to VideoWall from the grid view. In `app/(app)/grid/[path].tsx` (SynologyFileViewer), add a header button (e.g., a grid/video icon from `@expo/vector-icons`) that calls `router.push("/grid/video-wall")`. Only show the button when the current folder contains at least one video file.
|
||||||
|
- [x] T020 [US1] Implement long-press audio-on-focus behavior. In `VideoWallItem`, wrap the VideoView in a `Pressable` with `onLongPress` calling `onLongPress` prop. In `video-wall.tsx`, when `onLongPress` fires for an item: call `setFocusedVideo(path)` if different from current, or `setFocusedVideo(null)` to toggle off. The `isFocused` prop drives `player.muted` in `VideoWallItem` — when focused, unmute; all others stay muted.
|
||||||
|
- [x] T021 [US1] Implement tap-to-navigate to full video player. In `VideoWallItem`, the `onTap` prop triggers navigation. In `video-wall.tsx`, the `onTap` handler sets the `currentIndex` in `FilesContext` to the tapped video's index and calls `router.push("/grid/viewer/video")` following the existing pattern from `[path].tsx` grid item press.
|
||||||
|
|
||||||
|
**Checkpoint**: Full VideoWall experience works for folder browsing with simultaneous playback, audio-on-focus, and tap-to-view
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: User Story 3 - VideoWall from Search Results (Priority: P2)
|
||||||
|
|
||||||
|
**Goal**: VideoWall works with search/filter results, showing only matching videos.
|
||||||
|
|
||||||
|
**Independent Test**: Perform a search with filters that returns videos, switch to VideoWall, confirm only matching videos appear.
|
||||||
|
|
||||||
|
### Implementation for User Story 3
|
||||||
|
|
||||||
|
- [x] T022 [US3] Ensure VideoWall uses `filteredItems` when available. In `app/(app)/grid/video-wall.tsx` (SynologyFileViewer), check if `filteredItems` from `FilesContext` is non-empty — if so, use `filteredItems` filtered to videos only; otherwise use `allItems` filtered to videos. This should already work if T017 reads from the context correctly, but verify the logic handles both folder browsing and search result modes.
|
||||||
|
- [x] T023 [US3] Add VideoWall toggle from search results. In `app/(app)/search.tsx` (SynologyFileViewer), add a button (same icon as T019) that navigates to `/grid/video-wall` when search results contain at least one video. The `filteredItems` in `FilesContext` should already be populated by the search, so VideoWall will pick them up automatically.
|
||||||
|
|
||||||
|
**Checkpoint**: VideoWall works with both folder navigation and search/filter results
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 6: User Story 4 - Background Preview Generation (Priority: P3)
|
||||||
|
|
||||||
|
**Goal**: Preview clips are generated proactively during file watching so most are ready before users open VideoWall.
|
||||||
|
|
||||||
|
**Independent Test**: Add new video files to a monitored folder, wait for file watcher scan cycle, confirm preview clips appear in `PREVIEW_CLIPS_DIRECTORY` without any user request.
|
||||||
|
|
||||||
|
### Implementation for User Story 4
|
||||||
|
|
||||||
|
- [x] T024 [US4] Extend `process_new_files()` in `src/main.rs` to detect videos missing preview clips. After the existing EXIF batch query, add a batch query via `preview_dao.get_previews_batch()` for all discovered video paths. Collect videos that have no record or have `failed` status (for retry).
|
||||||
|
- [x] T025 [US4] Queue preview generation for new/unprocessed videos in `process_new_files()` in `src/main.rs`. For each video missing a preview, insert a `pending` record via `preview_dao.insert_preview()` (skip if already exists), then send `GeneratePreviewMessage` to the `PreviewClipGenerator` actor. Follow the existing pattern of sending `QueueVideosMessage` to `VideoPlaylistManager`.
|
||||||
|
- [x] T026 [US4] Add preview clip directory creation to startup scan in `src/main.rs`. During the initial startup thumbnail generation phase, also check for videos missing preview clips and queue them for generation (same logic as T024/T025 but for the initial full scan). Ensure the `PREVIEW_CLIPS_DIRECTORY` is created at startup if it doesn't exist.
|
||||||
|
|
||||||
|
**Checkpoint**: New videos automatically get preview clips generated during file watcher scans
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 7: Polish & Cross-Cutting Concerns
|
||||||
|
|
||||||
|
**Purpose**: Error handling, loading states, observability
|
||||||
|
|
||||||
|
- [x] T027 [P] Add loading/placeholder state for pending previews in `components/VideoWallItem.tsx` (SynologyFileViewer). Show the existing thumbnail from `/image?path=&size=thumb` with a semi-transparent overlay and a loading spinner when preview status is `pending` or `processing`.
|
||||||
|
- [x] T028 [P] Add error state for failed previews in `components/VideoWallItem.tsx` (SynologyFileViewer). Show the existing thumbnail with an error icon overlay and optional "Retry" text when preview status is `failed`.
|
||||||
|
- [x] T029 [P] Add OpenTelemetry tracing spans for preview generation in `src/video/actors.rs` and `src/main.rs` endpoints. Follow the existing pattern of `global_tracer().start("preview_clip_generate")` with status and duration attributes.
|
||||||
|
- [x] T030 Verify cargo build and cargo clippy pass with all backend changes. Fix any warnings or errors.
|
||||||
|
- [x] T031 Run quickstart.md validation: test both API endpoints manually with curl, verify preview clip file is generated in correct directory structure, confirm mobile app connects and displays VideoWall.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Dependencies & Execution Order
|
||||||
|
|
||||||
|
### Phase Dependencies
|
||||||
|
|
||||||
|
- **Setup (Phase 1)**: No dependencies — start immediately
|
||||||
|
- **Foundational (Phase 2)**: Depends on Phase 1 (migration must run first)
|
||||||
|
- **US2 - Server Generation (Phase 3)**: Depends on Phase 2 (needs DAO, models, types)
|
||||||
|
- **US1 - Mobile VideoWall (Phase 4)**: Depends on Phase 3 (needs API endpoints to exist)
|
||||||
|
- **US3 - Search Results (Phase 5)**: Depends on Phase 4 (extends VideoWall view)
|
||||||
|
- **US4 - Background Generation (Phase 6)**: Depends on Phase 3 only (backend only, no mobile dependency)
|
||||||
|
- **Polish (Phase 7)**: Depends on Phases 4 and 6
|
||||||
|
|
||||||
|
### User Story Dependencies
|
||||||
|
|
||||||
|
- **US2 (P1)**: Can start after Foundational — no other story dependencies
|
||||||
|
- **US1 (P1)**: Depends on US2 (needs preview API endpoints)
|
||||||
|
- **US3 (P2)**: Depends on US1 (extends the VideoWall view)
|
||||||
|
- **US4 (P3)**: Depends on US2 only (extends file watcher with preview generation; independent of mobile app)
|
||||||
|
|
||||||
|
### Within Each User Story
|
||||||
|
|
||||||
|
- Models/types before services/DAO
|
||||||
|
- DAO before actors
|
||||||
|
- Actors before endpoints
|
||||||
|
- Backend endpoints before mobile app views
|
||||||
|
- Core view before navigation integration
|
||||||
|
|
||||||
|
### Parallel Opportunities
|
||||||
|
|
||||||
|
**Phase 2**: T004, T005 can run in parallel (different files)
|
||||||
|
**Phase 3**: T008, T009 can run in parallel (ffmpeg.rs vs actors.rs)
|
||||||
|
**Phase 4**: T015, T016 can run in parallel (hook vs component, different files)
|
||||||
|
**Phase 6**: T024, T025 are sequential (same file) but Phase 6 can run in parallel with Phase 4/5
|
||||||
|
**Phase 7**: T027, T028, T029 can all run in parallel (different files)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Parallel Example: User Story 2
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Launch parallelizable tasks together:
|
||||||
|
Task T008: "Add generate_preview_clip() function in src/video/ffmpeg.rs"
|
||||||
|
Task T009: "Create PreviewClipGenerator actor in src/video/actors.rs"
|
||||||
|
|
||||||
|
# Then sequential tasks (depend on T008+T009):
|
||||||
|
Task T010: "Add PreviewClipGenerator to AppState in src/state.rs"
|
||||||
|
Task T011: "Implement GET /video/preview handler in src/main.rs"
|
||||||
|
Task T012: "Implement POST /video/preview/status handler in src/main.rs"
|
||||||
|
Task T013: "Register endpoints in route configuration in src/main.rs"
|
||||||
|
Task T014: "Handle short videos in generate_preview_clip() in src/video/ffmpeg.rs"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Parallel Example: User Story 1
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Launch parallelizable tasks together:
|
||||||
|
Task T015: "Create useVideoWall hook in hooks/useVideoWall.ts"
|
||||||
|
Task T016: "Create VideoWallItem component in components/VideoWallItem.tsx"
|
||||||
|
|
||||||
|
# Then sequential tasks (depend on T015+T016):
|
||||||
|
Task T017: "Create VideoWall view in app/(app)/grid/video-wall.tsx"
|
||||||
|
Task T018: "Add video-wall route to stack navigator"
|
||||||
|
Task T019: "Add navigation entry point from grid view"
|
||||||
|
Task T020: "Implement long-press audio-on-focus"
|
||||||
|
Task T021: "Implement tap-to-navigate to full video player"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### MVP First (US2 + US1)
|
||||||
|
|
||||||
|
1. Complete Phase 1: Setup (migration, env var)
|
||||||
|
2. Complete Phase 2: Foundational (model, DAO, types)
|
||||||
|
3. Complete Phase 3: US2 — Server generates preview clips
|
||||||
|
4. **STOP and VALIDATE**: Test API with curl per quickstart.md
|
||||||
|
5. Complete Phase 4: US1 — Mobile VideoWall view
|
||||||
|
6. **STOP and VALIDATE**: Test end-to-end on device
|
||||||
|
7. Deploy/demo — this is the MVP!
|
||||||
|
|
||||||
|
### Incremental Delivery
|
||||||
|
|
||||||
|
1. Setup + Foundational → Foundation ready
|
||||||
|
2. US2 (Server Generation) → Backend API working (testable with curl)
|
||||||
|
3. US1 (Mobile VideoWall) → Full end-to-end MVP (testable on device)
|
||||||
|
4. US3 (Search Results) → Extended browsing from search (incremental value)
|
||||||
|
5. US4 (Background Generation) → Performance enhancement (clips pre-generated)
|
||||||
|
6. Polish → Error states, tracing, validation
|
||||||
|
|
||||||
|
### Note on US4 Parallelism
|
||||||
|
|
||||||
|
US4 (Background Generation) only depends on US2 (backend), not on the mobile app. It can be developed in parallel with US1 by a second developer, or deferred to after MVP is validated.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- [P] tasks = different files, no dependencies
|
||||||
|
- [Story] label maps task to specific user story
|
||||||
|
- Backend work is in `C:\Users\ccord\RustroverProjects\ImageApi`
|
||||||
|
- Frontend work is in `C:\Users\ccord\development\SynologyFileViewer`
|
||||||
|
- Commit after each task or logical group
|
||||||
|
- Stop at any checkpoint to validate story independently
|
||||||
110
src/ai/apollo_client.rs
Normal file
110
src/ai/apollo_client.rs
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
//! Thin async HTTP client for Apollo's `/api/places/*` endpoints.
|
||||||
|
//!
|
||||||
|
//! Apollo (the personal location-history viewer at the sibling repo) owns
|
||||||
|
//! user-defined Places: `name + lat/lon + radius_m + description (+ optional
|
||||||
|
//! category)`. We consume them in two places:
|
||||||
|
//!
|
||||||
|
//! 1. Automatic enrichment in [`crate::ai::insight_generator`] — the always-on
|
||||||
|
//! path that folds the most-specific containing Place into the location
|
||||||
|
//! string fed to the LLM.
|
||||||
|
//! 2. The agentic `get_personal_place_at` tool — lets the LLM ask "what
|
||||||
|
//! user-defined place contains this lat/lon" during chat continuation.
|
||||||
|
//!
|
||||||
|
//! Apollo does the haversine. This client is plumbing only — no geometry,
|
||||||
|
//! no caching at the moment. If insight throughput ever makes per-photo
|
||||||
|
//! HTTP latency a problem, swap to a small `Mutex<HashMap>` TTL cache here.
|
||||||
|
//!
|
||||||
|
//! Configured via `APOLLO_API_BASE_URL`. When unset, the client constructs
|
||||||
|
//! to a no-op shell: every method returns empty / `None`, the enrichment
|
||||||
|
//! path silently falls through to the legacy Nominatim-only output, and the
|
||||||
|
//! tool registration in `insight_generator` reports "integration disabled."
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
// Public fields — `id`, `lat`, `lon` aren't read from the current tool
|
||||||
|
// output but are part of the wire model and useful for future tool
|
||||||
|
// extensions / debugging.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct ApolloPlace {
|
||||||
|
pub id: i32,
|
||||||
|
pub name: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub description: String,
|
||||||
|
pub lat: f64,
|
||||||
|
pub lon: f64,
|
||||||
|
pub radius_m: i32,
|
||||||
|
#[serde(default)]
|
||||||
|
pub category: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct PlacesResponse {
|
||||||
|
places: Vec<ApolloPlace>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ApolloClient {
|
||||||
|
client: Client,
|
||||||
|
/// `None` means the integration is disabled — every method returns
|
||||||
|
/// empty so the rest of insight generation runs unchanged.
|
||||||
|
base_url: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApolloClient {
|
||||||
|
pub fn new(base_url: Option<String>) -> Self {
|
||||||
|
// 5 s timeout: Apollo runs on the LAN. If it doesn't answer in
|
||||||
|
// five seconds, treat the call as failed and fall back to the
|
||||||
|
// legacy Nominatim path rather than block the whole insight.
|
||||||
|
let client = Client::builder()
|
||||||
|
.timeout(Duration::from_secs(5))
|
||||||
|
.build()
|
||||||
|
.expect("reqwest client build");
|
||||||
|
Self { client, base_url }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience for callers that need to know whether to register the
|
||||||
|
/// `get_personal_place_at` tool (or to short-circuit enrichment).
|
||||||
|
pub fn is_enabled(&self) -> bool {
|
||||||
|
self.base_url.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Server-side haversine: returns places whose radius contains
|
||||||
|
/// (lat, lon), already sorted smallest-radius-first by Apollo. The
|
||||||
|
/// caller can take `[0]` for the most-specific match (matches
|
||||||
|
/// Apollo's `primaryPlaceFor` rule on the frontend, so the carousel
|
||||||
|
/// badge and the LLM prompt always agree).
|
||||||
|
pub async fn places_containing(&self, lat: f64, lon: f64) -> Vec<ApolloPlace> {
|
||||||
|
let Some(base) = self.base_url.as_deref() else {
|
||||||
|
return Vec::new();
|
||||||
|
};
|
||||||
|
match self.fetch_places_containing(base, lat, lon).await {
|
||||||
|
Ok(places) => places,
|
||||||
|
Err(err) => {
|
||||||
|
log::warn!("apollo_client: places_containing({lat:.4}, {lon:.4}) failed: {err}");
|
||||||
|
Vec::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_places_containing(
|
||||||
|
&self,
|
||||||
|
base: &str,
|
||||||
|
lat: f64,
|
||||||
|
lon: f64,
|
||||||
|
) -> Result<Vec<ApolloPlace>> {
|
||||||
|
let url = format!("{}/api/places/contains", base.trim_end_matches('/'));
|
||||||
|
let resp = self
|
||||||
|
.client
|
||||||
|
.get(&url)
|
||||||
|
.query(&[("lat", lat), ("lon", lon)])
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.error_for_status()?;
|
||||||
|
let body: PlacesResponse = resp.json().await?;
|
||||||
|
Ok(body.places)
|
||||||
|
}
|
||||||
|
}
|
||||||
418
src/ai/daily_summary_job.rs
Normal file
418
src/ai/daily_summary_job.rs
Normal file
@@ -0,0 +1,418 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use chrono::{NaiveDate, Utc};
|
||||||
|
use opentelemetry::KeyValue;
|
||||||
|
use opentelemetry::trace::{Span, Status, TraceContextExt, Tracer};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::ai::{EMBEDDING_MODEL, OllamaClient, SmsApiClient, SmsMessage, user_display_name};
|
||||||
|
use crate::database::{DailySummaryDao, InsertDailySummary};
|
||||||
|
use crate::otel::global_tracer;
|
||||||
|
|
||||||
|
/// Strip boilerplate prefixes and common phrases from summaries before embedding.
|
||||||
|
/// This improves embedding diversity by removing structural similarity.
|
||||||
|
/// Maximum number of messages passed to the summarizer for a single day.
|
||||||
|
/// Tuned to avoid token overflow on typical chat models; shared between
|
||||||
|
/// the production job and the test binary so they can't drift.
|
||||||
|
pub const DAILY_SUMMARY_MESSAGE_LIMIT: usize = 300;
|
||||||
|
|
||||||
|
/// System prompt used when generating daily conversation summaries.
|
||||||
|
pub const DAILY_SUMMARY_SYSTEM_PROMPT: &str = "You are a conversation summarizer. Create clear, factual summaries with \
|
||||||
|
precise subject attribution AND extract distinctive keywords. Focus on \
|
||||||
|
specific, unique terms that differentiate this conversation from others.";
|
||||||
|
|
||||||
|
/// Build the prompt for a single day's conversation summary. Shared by the
|
||||||
|
/// production job and the test binary so prompt tweaks land in both places.
|
||||||
|
/// Returns `(prompt, system_prompt)`.
|
||||||
|
pub fn build_daily_summary_prompt(
|
||||||
|
contact: &str,
|
||||||
|
date: &NaiveDate,
|
||||||
|
messages: &[SmsMessage],
|
||||||
|
) -> (String, &'static str) {
|
||||||
|
let user_name = user_display_name();
|
||||||
|
let messages_text: String = messages
|
||||||
|
.iter()
|
||||||
|
.take(DAILY_SUMMARY_MESSAGE_LIMIT)
|
||||||
|
.map(|m| {
|
||||||
|
if m.is_sent {
|
||||||
|
format!("{}: {}", user_name, m.body)
|
||||||
|
} else {
|
||||||
|
format!("{}: {}", m.contact, m.body)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
let prompt = format!(
|
||||||
|
r#"Summarize this day's conversation between {user_name} and {contact}.
|
||||||
|
|
||||||
|
CRITICAL FORMAT RULES:
|
||||||
|
- Do NOT start with "Based on the conversation..." or "Here is a summary..." or similar preambles
|
||||||
|
- Do NOT repeat the date at the beginning
|
||||||
|
- Start DIRECTLY with the content - begin with a person's name or action
|
||||||
|
- Write in past tense, as if recording what happened
|
||||||
|
|
||||||
|
NARRATIVE (4-8 sentences):
|
||||||
|
- What specific topics, activities, or events were discussed?
|
||||||
|
- What places, people, or organizations were mentioned?
|
||||||
|
- What plans were made or decisions discussed?
|
||||||
|
- Clearly distinguish between what {user_name} did versus what {contact} did
|
||||||
|
|
||||||
|
KEYWORDS (comma-separated):
|
||||||
|
5-10 specific keywords that capture this conversation's unique content:
|
||||||
|
- Proper nouns (people, places, brands)
|
||||||
|
- Specific activities ("drum corps audition" not just "music")
|
||||||
|
- Distinctive terms that make this day unique
|
||||||
|
|
||||||
|
Date: {month_day_year} ({weekday})
|
||||||
|
Messages:
|
||||||
|
{messages_text}
|
||||||
|
|
||||||
|
YOUR RESPONSE (follow this format EXACTLY):
|
||||||
|
Summary: [Start directly with content, NO preamble]
|
||||||
|
|
||||||
|
Keywords: [specific, unique terms]"#,
|
||||||
|
user_name = user_name,
|
||||||
|
contact = contact,
|
||||||
|
month_day_year = date.format("%B %d, %Y"),
|
||||||
|
weekday = date.format("%A"),
|
||||||
|
messages_text = messages_text,
|
||||||
|
);
|
||||||
|
|
||||||
|
(prompt, DAILY_SUMMARY_SYSTEM_PROMPT)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn strip_summary_boilerplate(summary: &str) -> String {
|
||||||
|
let mut text = summary.trim().to_string();
|
||||||
|
|
||||||
|
// Remove markdown headers
|
||||||
|
while text.starts_with('#') {
|
||||||
|
if let Some(pos) = text.find('\n') {
|
||||||
|
text = text[pos..].trim_start().to_string();
|
||||||
|
} else {
|
||||||
|
// Single line with just headers, try to extract content after #s
|
||||||
|
text = text.trim_start_matches('#').trim().to_string();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove "Summary:" prefix variations (with optional markdown bold)
|
||||||
|
let prefixes = [
|
||||||
|
"**Summary:**",
|
||||||
|
"**Summary**:",
|
||||||
|
"*Summary:*",
|
||||||
|
"Summary:",
|
||||||
|
"**summary:**",
|
||||||
|
"summary:",
|
||||||
|
];
|
||||||
|
for prefix in prefixes {
|
||||||
|
if text.to_lowercase().starts_with(&prefix.to_lowercase()) {
|
||||||
|
text = text[prefix.len()..].trim_start().to_string();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove common opening phrases that add no semantic value
|
||||||
|
let opening_phrases = [
|
||||||
|
"Today, Melissa and I discussed",
|
||||||
|
"Today, Amanda and I discussed",
|
||||||
|
"Today Melissa and I discussed",
|
||||||
|
"Today Amanda and I discussed",
|
||||||
|
"Melissa and I discussed",
|
||||||
|
"Amanda and I discussed",
|
||||||
|
"Today, I discussed",
|
||||||
|
"Today I discussed",
|
||||||
|
"The conversation covered",
|
||||||
|
"This conversation covered",
|
||||||
|
"In this conversation,",
|
||||||
|
"During this conversation,",
|
||||||
|
];
|
||||||
|
|
||||||
|
for phrase in opening_phrases {
|
||||||
|
if text.to_lowercase().starts_with(&phrase.to_lowercase()) {
|
||||||
|
text = text[phrase.len()..].trim_start().to_string();
|
||||||
|
// Remove leading punctuation/articles after stripping phrase
|
||||||
|
text = text
|
||||||
|
.trim_start_matches([',', ':', '-'])
|
||||||
|
.trim_start()
|
||||||
|
.to_string();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any remaining leading markdown bold markers
|
||||||
|
if text.starts_with("**")
|
||||||
|
&& let Some(end) = text[2..].find("**")
|
||||||
|
{
|
||||||
|
// Keep the content between ** but remove the markers
|
||||||
|
let bold_content = &text[2..2 + end];
|
||||||
|
text = format!("{}{}", bold_content, &text[4 + end..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
text.trim().to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate and embed daily conversation summaries for a date range
|
||||||
|
/// Default: August 2024 ±30 days (July 1 - September 30, 2024)
|
||||||
|
pub async fn generate_daily_summaries(
|
||||||
|
contact: &str,
|
||||||
|
start_date: Option<NaiveDate>,
|
||||||
|
end_date: Option<NaiveDate>,
|
||||||
|
ollama: &OllamaClient,
|
||||||
|
sms_client: &SmsApiClient,
|
||||||
|
summary_dao: Arc<Mutex<Box<dyn DailySummaryDao>>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let tracer = global_tracer();
|
||||||
|
|
||||||
|
// Get current context (empty in background task) and start span with it
|
||||||
|
let current_cx = opentelemetry::Context::current();
|
||||||
|
let mut span = tracer.start_with_context("ai.daily_summary.generate_batch", ¤t_cx);
|
||||||
|
span.set_attribute(KeyValue::new("contact", contact.to_string()));
|
||||||
|
|
||||||
|
// Create context with this span for child operations
|
||||||
|
let parent_cx = current_cx.with_span(span);
|
||||||
|
|
||||||
|
// Default to August 2024 ±30 days
|
||||||
|
let start = start_date.unwrap_or_else(|| NaiveDate::from_ymd_opt(2024, 7, 1).unwrap());
|
||||||
|
let end = end_date.unwrap_or_else(|| NaiveDate::from_ymd_opt(2024, 9, 30).unwrap());
|
||||||
|
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("start_date", start.to_string()));
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("end_date", end.to_string()));
|
||||||
|
parent_cx.span().set_attribute(KeyValue::new(
|
||||||
|
"date_range_days",
|
||||||
|
(end - start).num_days() + 1,
|
||||||
|
));
|
||||||
|
|
||||||
|
log::info!("========================================");
|
||||||
|
log::info!("Starting daily summary generation for {}", contact);
|
||||||
|
log::info!(
|
||||||
|
"Date range: {} to {} ({} days)",
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
(end - start).num_days() + 1
|
||||||
|
);
|
||||||
|
log::info!("========================================");
|
||||||
|
|
||||||
|
// Fetch all messages for the contact in the date range
|
||||||
|
log::info!("Fetching messages for date range...");
|
||||||
|
let _start_timestamp = start.and_hms_opt(0, 0, 0).unwrap().and_utc().timestamp();
|
||||||
|
let _end_timestamp = end.and_hms_opt(23, 59, 59).unwrap().and_utc().timestamp();
|
||||||
|
|
||||||
|
let all_messages = sms_client.fetch_all_messages_for_contact(contact).await?;
|
||||||
|
|
||||||
|
// Filter to date range and group by date
|
||||||
|
let mut messages_by_date: HashMap<NaiveDate, Vec<SmsMessage>> = HashMap::new();
|
||||||
|
|
||||||
|
for msg in all_messages {
|
||||||
|
let msg_dt = chrono::DateTime::from_timestamp(msg.timestamp, 0);
|
||||||
|
if let Some(dt) = msg_dt {
|
||||||
|
let date = dt.date_naive();
|
||||||
|
if date >= start && date <= end {
|
||||||
|
messages_by_date.entry(date).or_default().push(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Grouped messages into {} days with activity",
|
||||||
|
messages_by_date.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
if messages_by_date.is_empty() {
|
||||||
|
log::warn!("No messages found in date range");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort dates for ordered processing
|
||||||
|
let mut dates: Vec<NaiveDate> = messages_by_date.keys().cloned().collect();
|
||||||
|
dates.sort();
|
||||||
|
|
||||||
|
let total_days = dates.len();
|
||||||
|
let mut processed = 0;
|
||||||
|
let mut skipped = 0;
|
||||||
|
let mut failed = 0;
|
||||||
|
|
||||||
|
log::info!("Processing {} days with messages...", total_days);
|
||||||
|
|
||||||
|
for (idx, date) in dates.iter().enumerate() {
|
||||||
|
let messages = messages_by_date.get(date).unwrap();
|
||||||
|
let date_str = date.format("%Y-%m-%d").to_string();
|
||||||
|
|
||||||
|
// Check if summary already exists
|
||||||
|
{
|
||||||
|
let mut dao = summary_dao.lock().expect("Unable to lock DailySummaryDao");
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
|
||||||
|
if dao
|
||||||
|
.summary_exists(&otel_context, &date_str, contact)
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
skipped += 1;
|
||||||
|
if idx % 10 == 0 {
|
||||||
|
log::info!(
|
||||||
|
"Progress: {}/{} ({} processed, {} skipped)",
|
||||||
|
idx + 1,
|
||||||
|
total_days,
|
||||||
|
processed,
|
||||||
|
skipped
|
||||||
|
);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate summary for this day
|
||||||
|
match generate_and_store_daily_summary(
|
||||||
|
&parent_cx,
|
||||||
|
date,
|
||||||
|
contact,
|
||||||
|
messages,
|
||||||
|
ollama,
|
||||||
|
summary_dao.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
processed += 1;
|
||||||
|
log::info!(
|
||||||
|
"✓ {}/{}: {} ({} messages)",
|
||||||
|
idx + 1,
|
||||||
|
total_days,
|
||||||
|
date_str,
|
||||||
|
messages.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
failed += 1;
|
||||||
|
log::error!("✗ Failed to process {}: {:?}", date_str, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate limiting: sleep 500ms between summaries
|
||||||
|
if idx < total_days - 1 {
|
||||||
|
sleep(std::time::Duration::from_millis(500)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress logging every 10 days
|
||||||
|
if idx % 10 == 0 && idx > 0 {
|
||||||
|
log::info!(
|
||||||
|
"Progress: {}/{} ({} processed, {} skipped, {} failed)",
|
||||||
|
idx + 1,
|
||||||
|
total_days,
|
||||||
|
processed,
|
||||||
|
skipped,
|
||||||
|
failed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!("========================================");
|
||||||
|
log::info!("Daily summary generation complete!");
|
||||||
|
log::info!(
|
||||||
|
"Processed: {}, Skipped: {}, Failed: {}",
|
||||||
|
processed,
|
||||||
|
skipped,
|
||||||
|
failed
|
||||||
|
);
|
||||||
|
log::info!("========================================");
|
||||||
|
|
||||||
|
// Record final metrics in span
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("days_processed", processed as i64));
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("days_skipped", skipped as i64));
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("days_failed", failed as i64));
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_attribute(KeyValue::new("total_days", total_days as i64));
|
||||||
|
|
||||||
|
if failed > 0 {
|
||||||
|
parent_cx
|
||||||
|
.span()
|
||||||
|
.set_status(Status::error(format!("{} days failed to process", failed)));
|
||||||
|
} else {
|
||||||
|
parent_cx.span().set_status(Status::Ok);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate and store a single day's summary
|
||||||
|
async fn generate_and_store_daily_summary(
|
||||||
|
parent_cx: &opentelemetry::Context,
|
||||||
|
date: &NaiveDate,
|
||||||
|
contact: &str,
|
||||||
|
messages: &[SmsMessage],
|
||||||
|
ollama: &OllamaClient,
|
||||||
|
summary_dao: Arc<Mutex<Box<dyn DailySummaryDao>>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let tracer = global_tracer();
|
||||||
|
let mut span = tracer.start_with_context("ai.daily_summary.generate_single", parent_cx);
|
||||||
|
span.set_attribute(KeyValue::new("date", date.to_string()));
|
||||||
|
span.set_attribute(KeyValue::new("contact", contact.to_string()));
|
||||||
|
span.set_attribute(KeyValue::new("message_count", messages.len() as i64));
|
||||||
|
|
||||||
|
let (prompt, system_prompt) = build_daily_summary_prompt(contact, date, messages);
|
||||||
|
|
||||||
|
// Generate summary with LLM
|
||||||
|
let summary = ollama.generate(&prompt, Some(system_prompt)).await?;
|
||||||
|
|
||||||
|
log::debug!(
|
||||||
|
"Generated summary for {}: {}",
|
||||||
|
date,
|
||||||
|
summary.chars().take(100).collect::<String>()
|
||||||
|
);
|
||||||
|
|
||||||
|
span.set_attribute(KeyValue::new("summary_length", summary.len() as i64));
|
||||||
|
|
||||||
|
// Strip boilerplate before embedding to improve vector diversity
|
||||||
|
let stripped_summary = strip_summary_boilerplate(&summary);
|
||||||
|
log::debug!(
|
||||||
|
"Stripped summary for embedding: {}",
|
||||||
|
stripped_summary.chars().take(100).collect::<String>()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Embed the stripped summary (store original summary in DB)
|
||||||
|
let embedding = ollama.generate_embedding(&stripped_summary).await?;
|
||||||
|
|
||||||
|
span.set_attribute(KeyValue::new(
|
||||||
|
"embedding_dimensions",
|
||||||
|
embedding.len() as i64,
|
||||||
|
));
|
||||||
|
|
||||||
|
// Store in database
|
||||||
|
let insert = InsertDailySummary {
|
||||||
|
date: date.format("%Y-%m-%d").to_string(),
|
||||||
|
contact: contact.to_string(),
|
||||||
|
summary: summary.trim().to_string(),
|
||||||
|
message_count: messages.len() as i32,
|
||||||
|
embedding,
|
||||||
|
created_at: Utc::now().timestamp(),
|
||||||
|
model_version: EMBEDDING_MODEL.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create context from current span for DB operation
|
||||||
|
let child_cx = opentelemetry::Context::current_with_span(span);
|
||||||
|
|
||||||
|
let mut dao = summary_dao.lock().expect("Unable to lock DailySummaryDao");
|
||||||
|
let result = dao
|
||||||
|
.store_summary(&child_cx, insert)
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to store summary: {:?}", e));
|
||||||
|
|
||||||
|
match &result {
|
||||||
|
Ok(_) => child_cx.span().set_status(Status::Ok),
|
||||||
|
Err(e) => child_cx.span().set_status(Status::error(e.to_string())),
|
||||||
|
}
|
||||||
|
|
||||||
|
result?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
397
src/ai/face_client.rs
Normal file
397
src/ai/face_client.rs
Normal file
@@ -0,0 +1,397 @@
|
|||||||
|
//! Thin async HTTP client for Apollo's `/api/internal/faces/*` endpoints.
|
||||||
|
//!
|
||||||
|
//! Apollo (the personal location-history viewer at the sibling repo) hosts the
|
||||||
|
//! insightface inference service. This client is the ImageApi side of the
|
||||||
|
//! contract — it shoves image bytes through `/detect` and returns boxes +
|
||||||
|
//! 512-d ArcFace embeddings, plus a single-embedding `/embed` for the manual
|
||||||
|
//! face-create flow.
|
||||||
|
//!
|
||||||
|
//! Mirrors `apollo_client.rs` shape: optional base URL (None = disabled, the
|
||||||
|
//! file watcher and manual-create handlers no-op), reqwest client with a
|
||||||
|
//! generous timeout because CPU inference on a backlog can take many seconds
|
||||||
|
//! per photo.
|
||||||
|
//!
|
||||||
|
//! Configured via `APOLLO_FACE_API_BASE_URL`, falling back to
|
||||||
|
//! `APOLLO_API_BASE_URL` when the dedicated var is unset (single-Apollo
|
||||||
|
//! deploys are the common case). Both unset → `is_enabled()` returns false.
|
||||||
|
//!
|
||||||
|
//! Wire format: multipart/form-data with `file=<bytes>` and `meta=<json>`.
|
||||||
|
//! `meta` carries `{content_hash, library_id, rel_path, orientation?,
|
||||||
|
//! model_version?}` — useful for Apollo-side logging and idempotency, ignored
|
||||||
|
//! by Apollo today but part of the stable wire contract so future versions
|
||||||
|
//! can act on it without a client change.
|
||||||
|
//!
|
||||||
|
//! Error mapping (reflected in [`FaceDetectError`]):
|
||||||
|
//! - 422 `decode_failed` → permanent: ImageApi marks `status='failed'` and
|
||||||
|
//! doesn't retry until manual rerun.
|
||||||
|
//! - 200 with `faces:[]` → `status='no_faces'` marker row.
|
||||||
|
//! - 503 `cuda_oom` / `engine_unavailable` → defer-and-retry: no marker
|
||||||
|
//! written.
|
||||||
|
//! - Any other 5xx / network error → defer.
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use base64::Engine;
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct DetectMeta {
|
||||||
|
pub content_hash: String,
|
||||||
|
pub library_id: i32,
|
||||||
|
pub rel_path: String,
|
||||||
|
/// EXIF orientation int (1..8). Apollo applies `exif_transpose` on the
|
||||||
|
/// bytes before inference, so this is informational only — supply when
|
||||||
|
/// the bytes were extracted from a RAW preview that lost the tag.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub orientation: Option<i32>,
|
||||||
|
/// Echoed back in the response. ImageApi stores it in
|
||||||
|
/// `face_detections.model_version`.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub model_version: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wire shape for the bbox sub-object Apollo returns. Read by Phase 3's
|
||||||
|
// file-watch hook; silence the dead-code lint until then.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct DetectedBbox {
|
||||||
|
pub x: f32,
|
||||||
|
pub y: f32,
|
||||||
|
pub w: f32,
|
||||||
|
pub h: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)] // bbox consumed by Phase 3 file-watch hook
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct DetectedFace {
|
||||||
|
pub bbox: DetectedBbox,
|
||||||
|
pub confidence: f32,
|
||||||
|
/// base64 of 2048 bytes (512×f32 LE). ImageApi stores the raw bytes
|
||||||
|
/// verbatim as a BLOB — see `decode_embedding` for the unpack.
|
||||||
|
pub embedding: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DetectedFace {
|
||||||
|
/// Decode the wire-format embedding back into raw bytes for storage.
|
||||||
|
/// Returns the 2048-byte little-endian f32 buffer or an error if the
|
||||||
|
/// base64 is malformed or the wrong length.
|
||||||
|
pub fn decode_embedding(&self) -> Result<Vec<u8>> {
|
||||||
|
let bytes = base64::engine::general_purpose::STANDARD
|
||||||
|
.decode(self.embedding.as_bytes())
|
||||||
|
.context("face embedding base64 decode")?;
|
||||||
|
if bytes.len() != 2048 {
|
||||||
|
anyhow::bail!(
|
||||||
|
"face embedding wrong size: got {} bytes, expected 2048",
|
||||||
|
bytes.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)] // duration_ms logged by Phase 3 file-watch hook
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct DetectResponse {
|
||||||
|
pub model_version: String,
|
||||||
|
pub duration_ms: i64,
|
||||||
|
pub faces: Vec<DetectedFace>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
#[allow(dead_code)] // Reported by Apollo; useful for future health-driven backoff
|
||||||
|
pub struct FaceHealth {
|
||||||
|
pub loaded: bool,
|
||||||
|
pub providers: Vec<String>,
|
||||||
|
pub model_version: String,
|
||||||
|
pub det_size: i32,
|
||||||
|
#[serde(default)]
|
||||||
|
pub load_error: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Distinguishes permanent failures (don't retry) from transient ones
|
||||||
|
/// (defer and retry on next scan tick). The file-watch hook keys its
|
||||||
|
/// marker-row decision on this — a `Permanent` outcome writes
|
||||||
|
/// `status='failed'`, a `Transient` outcome writes nothing so the next
|
||||||
|
/// pass tries again.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum FaceDetectError {
|
||||||
|
/// Apollo refused the bytes for a reason that won't change on retry
|
||||||
|
/// (decode failure, zero-dim image). Mark `status='failed'`.
|
||||||
|
Permanent(anyhow::Error),
|
||||||
|
/// Apollo couldn't process this turn but might next time (CUDA OOM,
|
||||||
|
/// engine not loaded yet, network hiccup). Don't mark anything.
|
||||||
|
Transient(anyhow::Error),
|
||||||
|
/// Feature is disabled (no `APOLLO_FACE_API_BASE_URL`). Caller should
|
||||||
|
/// silently no-op — same shape as `apollo_client::is_enabled()` false.
|
||||||
|
Disabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for FaceDetectError {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
FaceDetectError::Permanent(e) => write!(f, "permanent: {e}"),
|
||||||
|
FaceDetectError::Transient(e) => write!(f, "transient: {e}"),
|
||||||
|
FaceDetectError::Disabled => write!(f, "face client disabled"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for FaceDetectError {}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FaceClient {
|
||||||
|
client: Client,
|
||||||
|
/// `None` → disabled. Trim trailing slash at construction so url
|
||||||
|
/// building doesn't double up.
|
||||||
|
base_url: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FaceClient {
|
||||||
|
pub fn new(base_url: Option<String>) -> Self {
|
||||||
|
// 60 s timeout: CPU inference on a backlog can take many seconds
|
||||||
|
// per photo, especially the first call into a cold GPU. Apollo's
|
||||||
|
// bounded threadpool (1 worker on CUDA) means concurrent calls
|
||||||
|
// queue server-side; 60 s is enough headroom for a few items in
|
||||||
|
// the queue without surfacing a false transient.
|
||||||
|
let timeout_secs = std::env::var("FACE_DETECT_TIMEOUT_SEC")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse::<u64>().ok())
|
||||||
|
.unwrap_or(60);
|
||||||
|
let client = Client::builder()
|
||||||
|
.timeout(Duration::from_secs(timeout_secs))
|
||||||
|
.build()
|
||||||
|
.expect("reqwest client build");
|
||||||
|
Self {
|
||||||
|
client,
|
||||||
|
base_url: base_url.map(|u| u.trim_end_matches('/').to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_enabled(&self) -> bool {
|
||||||
|
self.base_url.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Detect every face in `bytes`. ImageApi calls this from the file-watch
|
||||||
|
/// hook (Phase 3) and from the manual rerun handler. Empty `faces[]` in
|
||||||
|
/// the response is the no-faces signal — caller writes a marker row.
|
||||||
|
#[allow(dead_code)] // Phase 3 file-watch hook + rerun handler
|
||||||
|
pub async fn detect(
|
||||||
|
&self,
|
||||||
|
bytes: Vec<u8>,
|
||||||
|
meta: DetectMeta,
|
||||||
|
) -> std::result::Result<DetectResponse, FaceDetectError> {
|
||||||
|
let Some(base) = self.base_url.as_deref() else {
|
||||||
|
return Err(FaceDetectError::Disabled);
|
||||||
|
};
|
||||||
|
let url = format!("{}/api/internal/faces/detect", base);
|
||||||
|
self.post_multipart(&url, bytes, &meta).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Single-embedding endpoint for the manual face-create flow. Caller
|
||||||
|
/// crops the image to the user-drawn bbox and passes those bytes; we
|
||||||
|
/// run detection inside the crop and return the highest-confidence
|
||||||
|
/// face's embedding. Apollo returns 422 `no_face_in_crop` when the
|
||||||
|
/// box missed — surfaced here as `Permanent`.
|
||||||
|
pub async fn embed(
|
||||||
|
&self,
|
||||||
|
bytes: Vec<u8>,
|
||||||
|
meta: DetectMeta,
|
||||||
|
) -> std::result::Result<DetectResponse, FaceDetectError> {
|
||||||
|
let Some(base) = self.base_url.as_deref() else {
|
||||||
|
return Err(FaceDetectError::Disabled);
|
||||||
|
};
|
||||||
|
let url = format!("{}/api/internal/faces/embed", base);
|
||||||
|
self.post_multipart(&url, bytes, &meta).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Engine reachability + provider/model report. Used by ImageApi for a
|
||||||
|
/// startup sanity check; not on the hot path.
|
||||||
|
#[allow(dead_code)] // Phase 3 startup probe
|
||||||
|
pub async fn health(&self) -> Result<FaceHealth> {
|
||||||
|
let base = self.base_url.as_deref().context("face client disabled")?;
|
||||||
|
let url = format!("{}/api/internal/faces/health", base);
|
||||||
|
let resp = self.client.get(&url).send().await?.error_for_status()?;
|
||||||
|
let body: FaceHealth = resp.json().await?;
|
||||||
|
Ok(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_multipart(
|
||||||
|
&self,
|
||||||
|
url: &str,
|
||||||
|
bytes: Vec<u8>,
|
||||||
|
meta: &DetectMeta,
|
||||||
|
) -> std::result::Result<DetectResponse, FaceDetectError> {
|
||||||
|
let meta_json = serde_json::to_string(meta)
|
||||||
|
.map_err(|e| FaceDetectError::Permanent(anyhow::anyhow!("meta serialize: {e}")))?;
|
||||||
|
let form = reqwest::multipart::Form::new()
|
||||||
|
.text("meta", meta_json)
|
||||||
|
.part(
|
||||||
|
"file",
|
||||||
|
reqwest::multipart::Part::bytes(bytes)
|
||||||
|
.file_name(meta.rel_path.clone())
|
||||||
|
.mime_str("application/octet-stream")
|
||||||
|
.unwrap_or_else(|_| reqwest::multipart::Part::bytes(Vec::new())),
|
||||||
|
);
|
||||||
|
|
||||||
|
let resp = match self.client.post(url).multipart(form).send().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) if e.is_timeout() || e.is_connect() => {
|
||||||
|
return Err(FaceDetectError::Transient(anyhow::anyhow!(
|
||||||
|
"face client network: {e}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(FaceDetectError::Transient(anyhow::anyhow!(
|
||||||
|
"face client request: {e}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = resp.status();
|
||||||
|
if status.is_success() {
|
||||||
|
let body: DetectResponse = resp.json().await.map_err(|e| {
|
||||||
|
FaceDetectError::Transient(anyhow::anyhow!("face response decode: {e}"))
|
||||||
|
})?;
|
||||||
|
return Ok(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_text = resp.text().await.unwrap_or_default();
|
||||||
|
Err(classify_error_response(status.as_u16(), &body_text))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map an Apollo HTTP error response to a FaceDetectError. Pulled out as a
|
||||||
|
/// pure function so the marker-row contract (422 → Permanent, 503 →
|
||||||
|
/// Transient) is unit-testable without spinning up an HTTP server.
|
||||||
|
fn classify_error_response(status: u16, body_text: &str) -> FaceDetectError {
|
||||||
|
// Apollo encodes its error class in the JSON body's `detail`. Try to
|
||||||
|
// parse it; fall back to status-only classification.
|
||||||
|
let detail_code = serde_json::from_str::<serde_json::Value>(body_text)
|
||||||
|
.ok()
|
||||||
|
.and_then(|v| {
|
||||||
|
// detail can be a string ("decode_failed") or an object
|
||||||
|
// ({"code": "cuda_oom", ...}) depending on the endpoint and
|
||||||
|
// Apollo's response shape — handle both.
|
||||||
|
v.get("detail")
|
||||||
|
.and_then(|d| d.as_str().map(str::to_string))
|
||||||
|
.or_else(|| {
|
||||||
|
v.get("detail")
|
||||||
|
.and_then(|d| d.get("code"))
|
||||||
|
.and_then(|c| c.as_str())
|
||||||
|
.map(str::to_string)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if status == 422 {
|
||||||
|
return FaceDetectError::Permanent(anyhow::anyhow!(
|
||||||
|
"face detect 422 {}: {}",
|
||||||
|
detail_code,
|
||||||
|
body_text
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if status == 503 {
|
||||||
|
return FaceDetectError::Transient(anyhow::anyhow!(
|
||||||
|
"face detect 503 {}: {}",
|
||||||
|
detail_code,
|
||||||
|
body_text
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// Infra-level 4xx that an operator can fix without re-encoding the
|
||||||
|
// bytes: 408 (proxy timeout), 413 (request too large — reverse-proxy
|
||||||
|
// body cap), 429 (rate limit). Treating these as Permanent poisons
|
||||||
|
// every photo that hit the misconfig with `status='failed'` and
|
||||||
|
// requires a manual DELETE to recover. Defer instead so the next
|
||||||
|
// scan tick retries naturally once the proxy is fixed.
|
||||||
|
if matches!(status, 408 | 413 | 429) {
|
||||||
|
return FaceDetectError::Transient(anyhow::anyhow!(
|
||||||
|
"face detect {} {}: {}",
|
||||||
|
status,
|
||||||
|
detail_code,
|
||||||
|
body_text
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// Any other 4xx: be conservative and treat as Permanent so we don't
|
||||||
|
// loop forever on a stable rejection. Any other 5xx: Transient —
|
||||||
|
// likely intermittent.
|
||||||
|
if (400..500).contains(&status) {
|
||||||
|
FaceDetectError::Permanent(anyhow::anyhow!(
|
||||||
|
"face detect {} {}: {}",
|
||||||
|
status,
|
||||||
|
detail_code,
|
||||||
|
body_text
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
FaceDetectError::Transient(anyhow::anyhow!(
|
||||||
|
"face detect {} {}: {}",
|
||||||
|
status,
|
||||||
|
detail_code,
|
||||||
|
body_text
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn is_permanent(e: &FaceDetectError) -> bool {
|
||||||
|
matches!(e, FaceDetectError::Permanent(_))
|
||||||
|
}
|
||||||
|
fn is_transient(e: &FaceDetectError) -> bool {
|
||||||
|
matches!(e, FaceDetectError::Transient(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn classify_422_decode_failed_is_permanent() {
|
||||||
|
// Permanent → ImageApi marks status='failed' and stops retrying.
|
||||||
|
let e = classify_error_response(422, r#"{"detail":"decode_failed: bad bytes"}"#);
|
||||||
|
assert!(is_permanent(&e), "422 decode_failed must be Permanent");
|
||||||
|
assert!(format!("{e}").contains("decode_failed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn classify_503_cuda_oom_is_transient() {
|
||||||
|
// Transient → ImageApi must NOT write a marker so the next scan
|
||||||
|
// retries. The detail.code is nested in an object rather than a
|
||||||
|
// bare string; the parser handles both.
|
||||||
|
let e = classify_error_response(
|
||||||
|
503,
|
||||||
|
r#"{"detail":{"code":"cuda_oom","error":"out of memory"}}"#,
|
||||||
|
);
|
||||||
|
assert!(is_transient(&e), "503 cuda_oom must be Transient");
|
||||||
|
assert!(format!("{e}").contains("cuda_oom"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn classify_500_is_transient_other_4xx_is_permanent() {
|
||||||
|
// Conservative split: 5xx defers (intermittent), other 4xx
|
||||||
|
// is treated as a stable rejection so we don't loop forever.
|
||||||
|
assert!(is_transient(&classify_error_response(500, "")));
|
||||||
|
assert!(is_transient(&classify_error_response(502, "{}")));
|
||||||
|
assert!(is_permanent(&classify_error_response(400, "{}")));
|
||||||
|
assert!(is_permanent(&classify_error_response(404, "{}")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn classify_infra_4xx_is_transient() {
|
||||||
|
// 408 / 413 / 429 are operator-fixable proxy/infra errors.
|
||||||
|
// Marking them Permanent poisons every affected photo with
|
||||||
|
// status='failed' and requires manual SQL to recover. The
|
||||||
|
// 413 path specifically bit us when nginx defaulted to a 1 MB
|
||||||
|
// body cap and rejected normal-size photos before they reached
|
||||||
|
// the backend.
|
||||||
|
assert!(is_transient(&classify_error_response(408, "")));
|
||||||
|
assert!(is_transient(&classify_error_response(413, "<html>nginx</html>")));
|
||||||
|
assert!(is_transient(&classify_error_response(429, "{}")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn classify_handles_unparseable_body() {
|
||||||
|
// Apollo can return non-JSON on misroute / proxy errors; the
|
||||||
|
// classifier must still produce a useful variant.
|
||||||
|
let e = classify_error_response(503, "<html>nginx</html>");
|
||||||
|
assert!(is_transient(&e));
|
||||||
|
}
|
||||||
|
}
|
||||||
986
src/ai/handlers.rs
Normal file
986
src/ai/handlers.rs
Normal file
@@ -0,0 +1,986 @@
|
|||||||
|
use actix_web::{HttpRequest, HttpResponse, Responder, delete, get, post, web};
|
||||||
|
use opentelemetry::KeyValue;
|
||||||
|
use opentelemetry::trace::{Span, Status, Tracer};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::ai::insight_chat::{ChatStreamEvent, ChatTurnRequest};
|
||||||
|
use crate::ai::ollama::ChatMessage;
|
||||||
|
use crate::ai::{InsightGenerator, ModelCapabilities, OllamaClient};
|
||||||
|
use crate::data::Claims;
|
||||||
|
use crate::database::{ExifDao, InsightDao};
|
||||||
|
use crate::libraries;
|
||||||
|
use crate::otel::{extract_context_from_request, global_tracer};
|
||||||
|
use crate::state::AppState;
|
||||||
|
use crate::utils::normalize_path;
|
||||||
|
|
||||||
|
/// Hardcoded few-shot exemplars for the agentic endpoint. Populate with the
|
||||||
|
/// ids of approved insights whose `training_messages` should be compressed
|
||||||
|
/// into trajectory form and injected into the system prompt. Empty = no
|
||||||
|
/// change in behavior. Request-level `fewshot_insight_ids` overrides this
|
||||||
|
/// when non-empty.
|
||||||
|
// const DEFAULT_FEWSHOT_INSIGHT_IDS: &[i32] = &[2918, 2908];
|
||||||
|
const DEFAULT_FEWSHOT_INSIGHT_IDS: &[i32] = &[];
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct GeneratePhotoInsightRequest {
|
||||||
|
pub file_path: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub model: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub system_prompt: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub num_ctx: Option<i32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub top_p: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub top_k: Option<i32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub min_p: Option<f32>,
|
||||||
|
/// `"local"` (default, Ollama with images) | `"hybrid"` (local vision +
|
||||||
|
/// OpenRouter chat). Only respected by the agentic endpoint.
|
||||||
|
#[serde(default)]
|
||||||
|
pub backend: Option<String>,
|
||||||
|
/// Insight ids whose stored `training_messages` should be compressed
|
||||||
|
/// into few-shot trajectories and injected into the system prompt.
|
||||||
|
/// Silently truncated to the first 2. When absent/empty, the handler
|
||||||
|
/// falls back to `DEFAULT_FEWSHOT_INSIGHT_IDS`.
|
||||||
|
#[serde(default)]
|
||||||
|
pub fewshot_insight_ids: Option<Vec<i32>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct GetPhotoInsightQuery {
|
||||||
|
pub path: String,
|
||||||
|
/// Library context for this lookup. Used to pick the right content
|
||||||
|
/// hash when the same rel_path exists under multiple roots.
|
||||||
|
#[serde(default)]
|
||||||
|
pub library: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct RateInsightRequest {
|
||||||
|
pub file_path: String,
|
||||||
|
pub approved: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct ExportTrainingDataQuery {
|
||||||
|
#[serde(default)]
|
||||||
|
pub approved_only: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct PhotoInsightResponse {
|
||||||
|
pub id: i32,
|
||||||
|
pub file_path: String,
|
||||||
|
pub title: String,
|
||||||
|
pub summary: String,
|
||||||
|
pub generated_at: i64,
|
||||||
|
pub model_version: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub prompt_eval_count: Option<i32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub eval_count: Option<i32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub approved: Option<bool>,
|
||||||
|
pub backend: String,
|
||||||
|
/// True when the insight was generated agentically and a chat
|
||||||
|
/// continuation can be started against it. Drives the mobile chat button.
|
||||||
|
pub has_training_messages: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct AvailableModelsResponse {
|
||||||
|
pub primary: ServerModels,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub fallback: Option<ServerModels>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct ServerModels {
|
||||||
|
pub url: String,
|
||||||
|
pub models: Vec<ModelCapabilities>,
|
||||||
|
pub default_model: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/generate - Generate insight for a specific photo
|
||||||
|
#[post("/insights/generate")]
|
||||||
|
pub async fn generate_insight_handler(
|
||||||
|
http_request: HttpRequest,
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<GeneratePhotoInsightRequest>,
|
||||||
|
insight_generator: web::Data<InsightGenerator>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let parent_context = extract_context_from_request(&http_request);
|
||||||
|
let tracer = global_tracer();
|
||||||
|
let mut span = tracer.start_with_context("http.insights.generate", &parent_context);
|
||||||
|
|
||||||
|
let normalized_path = normalize_path(&request.file_path);
|
||||||
|
|
||||||
|
span.set_attribute(KeyValue::new("file_path", normalized_path.clone()));
|
||||||
|
if let Some(ref model) = request.model {
|
||||||
|
span.set_attribute(KeyValue::new("model", model.clone()));
|
||||||
|
}
|
||||||
|
if let Some(ref prompt) = request.system_prompt {
|
||||||
|
span.set_attribute(KeyValue::new("has_custom_prompt", true));
|
||||||
|
span.set_attribute(KeyValue::new("prompt_length", prompt.len() as i64));
|
||||||
|
}
|
||||||
|
if let Some(ctx) = request.num_ctx {
|
||||||
|
span.set_attribute(KeyValue::new("num_ctx", ctx as i64));
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Manual insight generation triggered for photo: {} with model: {:?}, custom_prompt: {}, num_ctx: {:?}",
|
||||||
|
normalized_path,
|
||||||
|
request.model,
|
||||||
|
request.system_prompt.is_some(),
|
||||||
|
request.num_ctx
|
||||||
|
);
|
||||||
|
|
||||||
|
// Generate insight with optional custom model, system prompt, and context size
|
||||||
|
let result = insight_generator
|
||||||
|
.generate_insight_for_photo_with_config(
|
||||||
|
&normalized_path,
|
||||||
|
request.model.clone(),
|
||||||
|
request.system_prompt.clone(),
|
||||||
|
request.num_ctx,
|
||||||
|
request.temperature,
|
||||||
|
request.top_p,
|
||||||
|
request.top_k,
|
||||||
|
request.min_p,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(()) => {
|
||||||
|
span.set_status(Status::Ok);
|
||||||
|
HttpResponse::Ok().json(serde_json::json!({
|
||||||
|
"success": true,
|
||||||
|
"message": "Insight generated successfully"
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to generate insight: {:?}", e);
|
||||||
|
span.set_status(Status::error(e.to_string()));
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to generate insight: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights?path=/path/to/photo.jpg - Fetch insight for specific photo
|
||||||
|
#[get("/insights")]
|
||||||
|
pub async fn get_insight_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
query: web::Query<GetPhotoInsightQuery>,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
exif_dao: web::Data<std::sync::Mutex<Box<dyn ExifDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let normalized_path = normalize_path(&query.path);
|
||||||
|
log::debug!("Fetching insight for {}", normalized_path);
|
||||||
|
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
|
||||||
|
// Expand to rel_paths sharing content so an insight generated under
|
||||||
|
// library 1 still shows when the same photo is viewed from library 2.
|
||||||
|
let library = libraries::resolve_library_param(&app_state, query.library.as_deref())
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.unwrap_or_else(|| app_state.primary_library());
|
||||||
|
let sibling_paths = {
|
||||||
|
let mut exif = exif_dao.lock().expect("Unable to lock ExifDao");
|
||||||
|
exif.get_rel_paths_sharing_content(&otel_context, library.id, &normalized_path)
|
||||||
|
.unwrap_or_else(|_| vec![normalized_path.clone()])
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
|
||||||
|
match dao.get_insight_for_paths(&otel_context, &sibling_paths) {
|
||||||
|
Ok(Some(insight)) => {
|
||||||
|
let response = PhotoInsightResponse {
|
||||||
|
id: insight.id,
|
||||||
|
file_path: insight.file_path,
|
||||||
|
title: insight.title,
|
||||||
|
summary: insight.summary,
|
||||||
|
generated_at: insight.generated_at,
|
||||||
|
model_version: insight.model_version,
|
||||||
|
prompt_eval_count: None,
|
||||||
|
eval_count: None,
|
||||||
|
approved: insight.approved,
|
||||||
|
has_training_messages: insight.training_messages.is_some(),
|
||||||
|
backend: insight.backend,
|
||||||
|
};
|
||||||
|
HttpResponse::Ok().json(response)
|
||||||
|
}
|
||||||
|
Ok(None) => HttpResponse::NotFound().json(serde_json::json!({
|
||||||
|
"error": "Insight not found"
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to fetch insight ({}): {:?}", &query.path, e);
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to fetch insight: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DELETE /insights?path=/path/to/photo.jpg - Remove insight (will regenerate on next request)
|
||||||
|
#[delete("/insights")]
|
||||||
|
pub async fn delete_insight_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
query: web::Query<GetPhotoInsightQuery>,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let normalized_path = normalize_path(&query.path);
|
||||||
|
log::info!("Deleting insight for {}", normalized_path);
|
||||||
|
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
|
||||||
|
match dao.delete_insight(&otel_context, &normalized_path) {
|
||||||
|
Ok(()) => HttpResponse::Ok().json(serde_json::json!({
|
||||||
|
"success": true,
|
||||||
|
"message": "Insight deleted successfully"
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to delete insight: {:?}", e);
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to delete insight: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights/all - Get all insights
|
||||||
|
#[get("/insights/all")]
|
||||||
|
pub async fn get_all_insights_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
log::debug!("Fetching all insights");
|
||||||
|
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
|
||||||
|
match dao.get_all_insights(&otel_context) {
|
||||||
|
Ok(insights) => {
|
||||||
|
let responses: Vec<PhotoInsightResponse> = insights
|
||||||
|
.into_iter()
|
||||||
|
.map(|insight| PhotoInsightResponse {
|
||||||
|
id: insight.id,
|
||||||
|
file_path: insight.file_path,
|
||||||
|
title: insight.title,
|
||||||
|
summary: insight.summary,
|
||||||
|
generated_at: insight.generated_at,
|
||||||
|
model_version: insight.model_version,
|
||||||
|
prompt_eval_count: None,
|
||||||
|
eval_count: None,
|
||||||
|
approved: insight.approved,
|
||||||
|
has_training_messages: insight.training_messages.is_some(),
|
||||||
|
backend: insight.backend,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(responses)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to fetch all insights: {:?}", e);
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to fetch insights: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/generate/agentic - Generate insight using agentic tool-calling loop
|
||||||
|
#[post("/insights/generate/agentic")]
|
||||||
|
pub async fn generate_agentic_insight_handler(
|
||||||
|
http_request: HttpRequest,
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<GeneratePhotoInsightRequest>,
|
||||||
|
insight_generator: web::Data<InsightGenerator>,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let parent_context = extract_context_from_request(&http_request);
|
||||||
|
let tracer = global_tracer();
|
||||||
|
let mut span = tracer.start_with_context("http.insights.generate_agentic", &parent_context);
|
||||||
|
|
||||||
|
let normalized_path = normalize_path(&request.file_path);
|
||||||
|
|
||||||
|
span.set_attribute(KeyValue::new("file_path", normalized_path.clone()));
|
||||||
|
if let Some(ref model) = request.model {
|
||||||
|
span.set_attribute(KeyValue::new("model", model.clone()));
|
||||||
|
}
|
||||||
|
if let Some(ref prompt) = request.system_prompt {
|
||||||
|
span.set_attribute(KeyValue::new("has_custom_prompt", true));
|
||||||
|
span.set_attribute(KeyValue::new("prompt_length", prompt.len() as i64));
|
||||||
|
}
|
||||||
|
if let Some(ctx) = request.num_ctx {
|
||||||
|
span.set_attribute(KeyValue::new("num_ctx", ctx as i64));
|
||||||
|
}
|
||||||
|
|
||||||
|
let max_iterations: usize = std::env::var("AGENTIC_MAX_ITERATIONS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|v| v.parse().ok())
|
||||||
|
.unwrap_or(12);
|
||||||
|
|
||||||
|
span.set_attribute(KeyValue::new("max_iterations", max_iterations as i64));
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Agentic insight generation triggered for photo: {} with model: {:?}, max_iterations: {}",
|
||||||
|
normalized_path,
|
||||||
|
request.model,
|
||||||
|
max_iterations
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(ref b) = request.backend {
|
||||||
|
span.set_attribute(KeyValue::new("backend", b.clone()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve few-shot ids: request-provided ids take precedence when
|
||||||
|
// non-empty; otherwise fall back to the hardcoded defaults.
|
||||||
|
let fewshot_ids: Vec<i32> = match request.fewshot_insight_ids.as_deref() {
|
||||||
|
Some(ids) if !ids.is_empty() => ids.iter().take(2).copied().collect(),
|
||||||
|
_ => DEFAULT_FEWSHOT_INSIGHT_IDS
|
||||||
|
.iter()
|
||||||
|
.take(2)
|
||||||
|
.copied()
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
span.set_attribute(KeyValue::new("fewshot_count", fewshot_ids.len() as i64));
|
||||||
|
|
||||||
|
let fewshot_examples: Vec<Vec<ChatMessage>> = {
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
fewshot_ids
|
||||||
|
.iter()
|
||||||
|
.filter_map(|id| {
|
||||||
|
let insight = dao.get_insight_by_id(&otel_context, *id).ok().flatten()?;
|
||||||
|
let json = insight.training_messages?;
|
||||||
|
match serde_json::from_str::<Vec<ChatMessage>>(&json) {
|
||||||
|
Ok(msgs) => Some(msgs),
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!(
|
||||||
|
"Few-shot insight {} has malformed training_messages: {}",
|
||||||
|
id,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = insight_generator
|
||||||
|
.generate_agentic_insight_for_photo(
|
||||||
|
&normalized_path,
|
||||||
|
request.model.clone(),
|
||||||
|
request.system_prompt.clone(),
|
||||||
|
request.num_ctx,
|
||||||
|
request.temperature,
|
||||||
|
request.top_p,
|
||||||
|
request.top_k,
|
||||||
|
request.min_p,
|
||||||
|
max_iterations,
|
||||||
|
request.backend.clone(),
|
||||||
|
fewshot_examples,
|
||||||
|
fewshot_ids,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok((prompt_eval_count, eval_count)) => {
|
||||||
|
span.set_status(Status::Ok);
|
||||||
|
// Fetch the stored insight to return it
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
match dao.get_insight(&otel_context, &normalized_path) {
|
||||||
|
Ok(Some(insight)) => {
|
||||||
|
let response = PhotoInsightResponse {
|
||||||
|
id: insight.id,
|
||||||
|
file_path: insight.file_path,
|
||||||
|
title: insight.title,
|
||||||
|
summary: insight.summary,
|
||||||
|
generated_at: insight.generated_at,
|
||||||
|
model_version: insight.model_version,
|
||||||
|
prompt_eval_count,
|
||||||
|
eval_count,
|
||||||
|
approved: insight.approved,
|
||||||
|
has_training_messages: insight.training_messages.is_some(),
|
||||||
|
backend: insight.backend,
|
||||||
|
};
|
||||||
|
HttpResponse::Ok().json(response)
|
||||||
|
}
|
||||||
|
Ok(None) => HttpResponse::Ok().json(serde_json::json!({
|
||||||
|
"success": true,
|
||||||
|
"message": "Agentic insight generated successfully"
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Insight stored but failed to retrieve: {:?}", e);
|
||||||
|
HttpResponse::Ok().json(serde_json::json!({
|
||||||
|
"success": true,
|
||||||
|
"message": "Agentic insight generated successfully"
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let error_msg = format!("{:?}", e);
|
||||||
|
log::error!("Failed to generate agentic insight: {}", error_msg);
|
||||||
|
span.set_status(Status::error(error_msg.clone()));
|
||||||
|
|
||||||
|
if error_msg.contains("tool calling not supported")
|
||||||
|
|| error_msg.contains("model not available")
|
||||||
|
{
|
||||||
|
HttpResponse::BadRequest().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to generate agentic insight: {}", error_msg)
|
||||||
|
}))
|
||||||
|
} else if error_msg.contains("error parsing tool call") {
|
||||||
|
HttpResponse::BadRequest().json(serde_json::json!({
|
||||||
|
"error": "Model is not compatible with Ollama's tool calling protocol. Try a model known to support native tool calling (e.g. llama3.1, llama3.2, qwen2.5, mistral-nemo)."
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to generate agentic insight: {}", error_msg)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights/models - List available models from both servers with capabilities
|
||||||
|
#[get("/insights/models")]
|
||||||
|
pub async fn get_available_models_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
app_state: web::Data<crate::state::AppState>,
|
||||||
|
) -> impl Responder {
|
||||||
|
log::debug!("Fetching available models with capabilities");
|
||||||
|
|
||||||
|
let ollama_client = &app_state.ollama;
|
||||||
|
|
||||||
|
// Fetch models with capabilities from primary server
|
||||||
|
let primary_models =
|
||||||
|
match OllamaClient::list_models_with_capabilities(&ollama_client.primary_url).await {
|
||||||
|
Ok(models) => models,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Failed to fetch models from primary server: {:?}", e);
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let primary = ServerModels {
|
||||||
|
url: ollama_client.primary_url.clone(),
|
||||||
|
models: primary_models,
|
||||||
|
default_model: ollama_client.primary_model.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fetch models with capabilities from fallback server if configured
|
||||||
|
let fallback = if let Some(fallback_url) = &ollama_client.fallback_url {
|
||||||
|
match OllamaClient::list_models_with_capabilities(fallback_url).await {
|
||||||
|
Ok(models) => Some(ServerModels {
|
||||||
|
url: fallback_url.clone(),
|
||||||
|
models,
|
||||||
|
default_model: ollama_client
|
||||||
|
.fallback_model
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| ollama_client.primary_model.clone()),
|
||||||
|
}),
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Failed to fetch models from fallback server: {:?}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = AvailableModelsResponse { primary, fallback };
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct OpenRouterModelsResponse {
|
||||||
|
pub models: Vec<String>,
|
||||||
|
pub default_model: Option<String>,
|
||||||
|
pub configured: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights/openrouter/models - Curated OpenRouter model ids exposed
|
||||||
|
/// to clients for the hybrid backend. Returned verbatim from
|
||||||
|
/// `OPENROUTER_ALLOWED_MODELS`; no live call to OpenRouter.
|
||||||
|
#[get("/insights/openrouter/models")]
|
||||||
|
pub async fn get_openrouter_models_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
app_state: web::Data<crate::state::AppState>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let configured = app_state.openrouter.is_some();
|
||||||
|
let default_model = app_state
|
||||||
|
.openrouter
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.primary_model.clone());
|
||||||
|
let response = OpenRouterModelsResponse {
|
||||||
|
models: app_state.openrouter_allowed_models.clone(),
|
||||||
|
default_model,
|
||||||
|
configured,
|
||||||
|
};
|
||||||
|
HttpResponse::Ok().json(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/rate - Rate an insight (thumbs up/down for training data)
|
||||||
|
#[post("/insights/rate")]
|
||||||
|
pub async fn rate_insight_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<RateInsightRequest>,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let normalized_path = normalize_path(&request.file_path);
|
||||||
|
log::info!(
|
||||||
|
"Rating insight for {}: approved={}",
|
||||||
|
normalized_path,
|
||||||
|
request.approved
|
||||||
|
);
|
||||||
|
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
|
||||||
|
match dao.rate_insight(&otel_context, &normalized_path, request.approved) {
|
||||||
|
Ok(()) => HttpResponse::Ok().json(serde_json::json!({
|
||||||
|
"success": true,
|
||||||
|
"message": "Insight rated successfully"
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to rate insight: {:?}", e);
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to rate insight: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights/training-data - Export approved training data as JSONL
|
||||||
|
#[get("/insights/training-data")]
|
||||||
|
pub async fn export_training_data_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
query: web::Query<ExportTrainingDataQuery>,
|
||||||
|
insight_dao: web::Data<std::sync::Mutex<Box<dyn InsightDao>>>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let approved_only = query.approved_only.unwrap_or(true);
|
||||||
|
log::info!("Exporting training data (approved_only={})", approved_only);
|
||||||
|
|
||||||
|
let otel_context = opentelemetry::Context::new();
|
||||||
|
let mut dao = insight_dao.lock().expect("Unable to lock InsightDao");
|
||||||
|
|
||||||
|
let insights = if approved_only {
|
||||||
|
dao.get_approved_insights(&otel_context)
|
||||||
|
} else {
|
||||||
|
dao.get_all_insights(&otel_context)
|
||||||
|
};
|
||||||
|
|
||||||
|
match insights {
|
||||||
|
Ok(insights) => {
|
||||||
|
let mut jsonl = String::new();
|
||||||
|
for insight in &insights {
|
||||||
|
if let Some(ref messages) = insight.training_messages {
|
||||||
|
let entry = serde_json::json!({
|
||||||
|
"file_path": insight.file_path,
|
||||||
|
"model_version": insight.model_version,
|
||||||
|
"generated_at": insight.generated_at,
|
||||||
|
"title": insight.title,
|
||||||
|
"summary": insight.summary,
|
||||||
|
"messages": serde_json::from_str::<serde_json::Value>(messages)
|
||||||
|
.unwrap_or(serde_json::Value::Null),
|
||||||
|
});
|
||||||
|
jsonl.push_str(&entry.to_string());
|
||||||
|
jsonl.push('\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HttpResponse::Ok()
|
||||||
|
.content_type("application/jsonl")
|
||||||
|
.insert_header((
|
||||||
|
"Content-Disposition",
|
||||||
|
"attachment; filename=\"training_data.jsonl\"",
|
||||||
|
))
|
||||||
|
.body(jsonl)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to export training data: {:?}", e);
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({
|
||||||
|
"error": format!("Failed to export training data: {:?}", e)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct ChatTurnHttpRequest {
|
||||||
|
pub file_path: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub library: Option<String>,
|
||||||
|
pub user_message: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub model: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub backend: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub num_ctx: Option<i32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub top_p: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub top_k: Option<i32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub min_p: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub max_iterations: Option<usize>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub amend: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct ChatTurnHttpResponse {
|
||||||
|
pub assistant_message: String,
|
||||||
|
pub tool_calls_made: usize,
|
||||||
|
pub iterations_used: usize,
|
||||||
|
pub truncated: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub prompt_eval_count: Option<i32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub eval_count: Option<i32>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub amended_insight_id: Option<i32>,
|
||||||
|
pub backend: String,
|
||||||
|
pub model: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/chat — submit a follow-up turn against an existing insight.
|
||||||
|
#[post("/insights/chat")]
|
||||||
|
pub async fn chat_turn_handler(
|
||||||
|
http_request: HttpRequest,
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<ChatTurnHttpRequest>,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let parent_context = extract_context_from_request(&http_request);
|
||||||
|
let tracer = global_tracer();
|
||||||
|
let mut span = tracer.start_with_context("http.insights.chat", &parent_context);
|
||||||
|
span.set_attribute(KeyValue::new("file_path", request.file_path.clone()));
|
||||||
|
|
||||||
|
let library = match libraries::resolve_library_param(&app_state, request.library.as_deref()) {
|
||||||
|
Ok(Some(lib)) => lib,
|
||||||
|
Ok(None) => app_state.primary_library(),
|
||||||
|
Err(e) => {
|
||||||
|
return HttpResponse::BadRequest().json(serde_json::json!({
|
||||||
|
"error": format!("invalid library: {}", e)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let chat_req = ChatTurnRequest {
|
||||||
|
library_id: library.id,
|
||||||
|
file_path: request.file_path.clone(),
|
||||||
|
user_message: request.user_message.clone(),
|
||||||
|
model: request.model.clone(),
|
||||||
|
backend: request.backend.clone(),
|
||||||
|
num_ctx: request.num_ctx,
|
||||||
|
temperature: request.temperature,
|
||||||
|
top_p: request.top_p,
|
||||||
|
top_k: request.top_k,
|
||||||
|
min_p: request.min_p,
|
||||||
|
max_iterations: request.max_iterations,
|
||||||
|
amend: request.amend,
|
||||||
|
};
|
||||||
|
|
||||||
|
match app_state.insight_chat.chat_turn(chat_req).await {
|
||||||
|
Ok(result) => {
|
||||||
|
span.set_status(Status::Ok);
|
||||||
|
HttpResponse::Ok().json(ChatTurnHttpResponse {
|
||||||
|
assistant_message: result.assistant_message,
|
||||||
|
tool_calls_made: result.tool_calls_made,
|
||||||
|
iterations_used: result.iterations_used,
|
||||||
|
truncated: result.truncated,
|
||||||
|
prompt_eval_count: result.prompt_eval_count,
|
||||||
|
eval_count: result.eval_count,
|
||||||
|
amended_insight_id: result.amended_insight_id,
|
||||||
|
backend: result.backend_used,
|
||||||
|
model: result.model_used,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let msg = format!("{}", e);
|
||||||
|
log::error!("Chat turn failed: {}", msg);
|
||||||
|
span.set_status(Status::error(msg.clone()));
|
||||||
|
|
||||||
|
// Map well-known errors to client-facing 4xx codes.
|
||||||
|
if msg.contains("no insight found") {
|
||||||
|
HttpResponse::NotFound().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else if msg.contains("no chat history") {
|
||||||
|
HttpResponse::Conflict().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else if msg.contains("user_message")
|
||||||
|
|| msg.contains("unknown backend")
|
||||||
|
|| msg.contains("switching from local to hybrid")
|
||||||
|
|| msg.contains("hybrid backend unavailable")
|
||||||
|
{
|
||||||
|
HttpResponse::BadRequest().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else {
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({ "error": msg }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct ChatHistoryQuery {
|
||||||
|
pub path: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub library: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct ChatHistoryHttpResponse {
|
||||||
|
pub messages: Vec<RenderedHistoryMessage>,
|
||||||
|
pub turn_count: usize,
|
||||||
|
pub model_version: String,
|
||||||
|
pub backend: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct RenderedHistoryMessage {
|
||||||
|
pub role: String,
|
||||||
|
pub content: String,
|
||||||
|
pub is_initial: bool,
|
||||||
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||||
|
pub tools: Vec<HistoryToolInvocation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct HistoryToolInvocation {
|
||||||
|
pub name: String,
|
||||||
|
pub arguments: serde_json::Value,
|
||||||
|
pub result: String,
|
||||||
|
#[serde(skip_serializing_if = "std::ops::Not::not")]
|
||||||
|
pub result_truncated: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct ChatRewindHttpRequest {
|
||||||
|
pub file_path: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub library: Option<String>,
|
||||||
|
/// 0-based index into the rendered transcript. The message at this
|
||||||
|
/// index, and everything after it, is discarded. Must be > 0 — the
|
||||||
|
/// initial user message is protected.
|
||||||
|
pub discard_from_rendered_index: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/chat/rewind — truncate the stored conversation so the
|
||||||
|
/// rendered message at `discard_from_rendered_index` (and everything after)
|
||||||
|
/// is removed. Use when a user wants to retry a turn with a different
|
||||||
|
/// prompt without prior replies poisoning context.
|
||||||
|
#[post("/insights/chat/rewind")]
|
||||||
|
pub async fn chat_rewind_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<ChatRewindHttpRequest>,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let library = match libraries::resolve_library_param(&app_state, request.library.as_deref()) {
|
||||||
|
Ok(Some(lib)) => lib,
|
||||||
|
Ok(None) => app_state.primary_library(),
|
||||||
|
Err(e) => {
|
||||||
|
return HttpResponse::BadRequest().json(serde_json::json!({
|
||||||
|
"error": format!("invalid library: {}", e)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match app_state
|
||||||
|
.insight_chat
|
||||||
|
.rewind_history(
|
||||||
|
library.id,
|
||||||
|
&request.file_path,
|
||||||
|
request.discard_from_rendered_index,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => HttpResponse::Ok().json(serde_json::json!({ "success": true })),
|
||||||
|
Err(e) => {
|
||||||
|
let msg = format!("{}", e);
|
||||||
|
log::error!("Chat rewind failed: {}", msg);
|
||||||
|
if msg.contains("no insight found") {
|
||||||
|
HttpResponse::NotFound().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else if msg.contains("no chat history") {
|
||||||
|
HttpResponse::Conflict().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else if msg.contains("cannot discard the initial") || msg.contains("out of range") {
|
||||||
|
HttpResponse::BadRequest().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else {
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({ "error": msg }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GET /insights/chat/history — return the rendered transcript for a photo.
|
||||||
|
#[get("/insights/chat/history")]
|
||||||
|
pub async fn chat_history_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
query: web::Query<ChatHistoryQuery>,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
) -> impl Responder {
|
||||||
|
// library param parsed for parity with other insight endpoints, even
|
||||||
|
// though load_history currently keys on file_path alone (matches the
|
||||||
|
// existing get_insight DAO contract).
|
||||||
|
let _library = libraries::resolve_library_param(&app_state, query.library.as_deref())
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.unwrap_or_else(|| app_state.primary_library());
|
||||||
|
|
||||||
|
match app_state.insight_chat.load_history(&query.path) {
|
||||||
|
Ok(view) => HttpResponse::Ok().json(ChatHistoryHttpResponse {
|
||||||
|
messages: view
|
||||||
|
.messages
|
||||||
|
.into_iter()
|
||||||
|
.map(|m| RenderedHistoryMessage {
|
||||||
|
role: m.role,
|
||||||
|
content: m.content,
|
||||||
|
is_initial: m.is_initial,
|
||||||
|
tools: m
|
||||||
|
.tools
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| HistoryToolInvocation {
|
||||||
|
name: t.name,
|
||||||
|
arguments: t.arguments,
|
||||||
|
result: t.result,
|
||||||
|
result_truncated: t.result_truncated,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
turn_count: view.turn_count,
|
||||||
|
model_version: view.model_version,
|
||||||
|
backend: view.backend,
|
||||||
|
}),
|
||||||
|
Err(e) => {
|
||||||
|
let msg = format!("{}", e);
|
||||||
|
if msg.contains("no insight found") {
|
||||||
|
HttpResponse::NotFound().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else if msg.contains("no chat history") {
|
||||||
|
HttpResponse::Conflict().json(serde_json::json!({ "error": msg }))
|
||||||
|
} else {
|
||||||
|
HttpResponse::InternalServerError().json(serde_json::json!({ "error": msg }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /insights/chat/stream — streaming variant of /insights/chat.
|
||||||
|
/// Returns `text/event-stream` with one event per chat stream event.
|
||||||
|
#[post("/insights/chat/stream")]
|
||||||
|
pub async fn chat_stream_handler(
|
||||||
|
_claims: Claims,
|
||||||
|
request: web::Json<ChatTurnHttpRequest>,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
) -> HttpResponse {
|
||||||
|
let library = match libraries::resolve_library_param(&app_state, request.library.as_deref()) {
|
||||||
|
Ok(Some(lib)) => lib,
|
||||||
|
Ok(None) => app_state.primary_library(),
|
||||||
|
Err(e) => {
|
||||||
|
return HttpResponse::BadRequest().json(serde_json::json!({
|
||||||
|
"error": format!("invalid library: {}", e)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let chat_req = ChatTurnRequest {
|
||||||
|
library_id: library.id,
|
||||||
|
file_path: request.file_path.clone(),
|
||||||
|
user_message: request.user_message.clone(),
|
||||||
|
model: request.model.clone(),
|
||||||
|
backend: request.backend.clone(),
|
||||||
|
num_ctx: request.num_ctx,
|
||||||
|
temperature: request.temperature,
|
||||||
|
top_p: request.top_p,
|
||||||
|
top_k: request.top_k,
|
||||||
|
min_p: request.min_p,
|
||||||
|
max_iterations: request.max_iterations,
|
||||||
|
amend: request.amend,
|
||||||
|
};
|
||||||
|
|
||||||
|
let service = app_state.insight_chat.clone();
|
||||||
|
let events = service.chat_turn_stream(chat_req);
|
||||||
|
|
||||||
|
// Map ChatStreamEvent → SSE frame bytes.
|
||||||
|
let sse_stream = futures::stream::StreamExt::map(events, |ev| {
|
||||||
|
let frame = render_sse_frame(&ev);
|
||||||
|
Ok::<_, actix_web::Error>(actix_web::web::Bytes::from(frame))
|
||||||
|
});
|
||||||
|
|
||||||
|
HttpResponse::Ok()
|
||||||
|
.content_type("text/event-stream")
|
||||||
|
.insert_header(("Cache-Control", "no-cache"))
|
||||||
|
.insert_header(("X-Accel-Buffering", "no")) // nginx: disable response buffering
|
||||||
|
.streaming(sse_stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_sse_frame(ev: &ChatStreamEvent) -> String {
|
||||||
|
let (event_name, payload) = match ev {
|
||||||
|
ChatStreamEvent::IterationStart { n, max } => {
|
||||||
|
("iteration_start", serde_json::json!({ "n": n, "max": max }))
|
||||||
|
}
|
||||||
|
ChatStreamEvent::Truncated => ("truncated", serde_json::json!({})),
|
||||||
|
ChatStreamEvent::TextDelta(delta) => ("text", serde_json::json!({ "delta": delta })),
|
||||||
|
ChatStreamEvent::ToolCall {
|
||||||
|
index,
|
||||||
|
name,
|
||||||
|
arguments,
|
||||||
|
} => (
|
||||||
|
"tool_call",
|
||||||
|
serde_json::json!({ "index": index, "name": name, "arguments": arguments }),
|
||||||
|
),
|
||||||
|
ChatStreamEvent::ToolResult {
|
||||||
|
index,
|
||||||
|
name,
|
||||||
|
result,
|
||||||
|
result_truncated,
|
||||||
|
} => (
|
||||||
|
"tool_result",
|
||||||
|
serde_json::json!({
|
||||||
|
"index": index,
|
||||||
|
"name": name,
|
||||||
|
"result": result,
|
||||||
|
"result_truncated": result_truncated,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
ChatStreamEvent::Done {
|
||||||
|
tool_calls_made,
|
||||||
|
iterations_used,
|
||||||
|
truncated,
|
||||||
|
prompt_eval_count,
|
||||||
|
eval_count,
|
||||||
|
amended_insight_id,
|
||||||
|
backend_used,
|
||||||
|
model_used,
|
||||||
|
} => (
|
||||||
|
"done",
|
||||||
|
serde_json::json!({
|
||||||
|
"tool_calls_made": tool_calls_made,
|
||||||
|
"iterations_used": iterations_used,
|
||||||
|
"truncated": truncated,
|
||||||
|
"prompt_eval_count": prompt_eval_count,
|
||||||
|
"eval_count": eval_count,
|
||||||
|
"amended_insight_id": amended_insight_id,
|
||||||
|
"backend": backend_used,
|
||||||
|
"model": model_used,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
ChatStreamEvent::Error(msg) => ("error", serde_json::json!({ "message": msg })),
|
||||||
|
};
|
||||||
|
let data = serde_json::to_string(&payload).unwrap_or_else(|_| "{}".to_string());
|
||||||
|
format!("event: {}\ndata: {}\n\n", event_name, data)
|
||||||
|
}
|
||||||
1387
src/ai/insight_chat.rs
Normal file
1387
src/ai/insight_chat.rs
Normal file
File diff suppressed because it is too large
Load Diff
3961
src/ai/insight_generator.rs
Normal file
3961
src/ai/insight_generator.rs
Normal file
File diff suppressed because it is too large
Load Diff
172
src/ai/llm_client.rs
Normal file
172
src/ai/llm_client.rs
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures::stream::BoxStream;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// Provider-agnostic surface for LLM backends (Ollama, OpenRouter, …).
|
||||||
|
///
|
||||||
|
/// Impls translate these canonical shapes at the wire boundary: tool-call
|
||||||
|
/// arguments stay as `serde_json::Value` in memory and are stringified only
|
||||||
|
/// when a provider requires it (OpenAI-compatible APIs do), and `images`
|
||||||
|
/// stays as base64 strings here and is rewritten into content-parts where
|
||||||
|
/// needed.
|
||||||
|
// First consumer lands in a later PR (OpenRouter impl + hybrid mode routing).
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[async_trait]
|
||||||
|
pub trait LlmClient: Send + Sync {
|
||||||
|
/// Single-shot text generation. Optional system prompt and optional
|
||||||
|
/// base64 images (ignored by providers without vision support).
|
||||||
|
async fn generate(
|
||||||
|
&self,
|
||||||
|
prompt: &str,
|
||||||
|
system: Option<&str>,
|
||||||
|
images: Option<Vec<String>>,
|
||||||
|
) -> Result<String>;
|
||||||
|
|
||||||
|
/// Multi-turn chat with tool definitions. Returns the assistant message
|
||||||
|
/// (which may contain tool_calls) plus optional prompt/eval token counts.
|
||||||
|
async fn chat_with_tools(
|
||||||
|
&self,
|
||||||
|
messages: Vec<ChatMessage>,
|
||||||
|
tools: Vec<Tool>,
|
||||||
|
) -> Result<(ChatMessage, Option<i32>, Option<i32>)>;
|
||||||
|
|
||||||
|
/// Streaming variant of `chat_with_tools`. The returned stream yields
|
||||||
|
/// `TextDelta` items as content is produced, then a single terminal
|
||||||
|
/// `Done` carrying the complete assembled message (with tool_calls, if
|
||||||
|
/// any) plus token usage counts. Implementations that can't stream may
|
||||||
|
/// fall back to calling `chat_with_tools` and emitting the full reply
|
||||||
|
/// as one `Done` event.
|
||||||
|
async fn chat_with_tools_stream(
|
||||||
|
&self,
|
||||||
|
messages: Vec<ChatMessage>,
|
||||||
|
tools: Vec<Tool>,
|
||||||
|
) -> Result<BoxStream<'static, Result<LlmStreamEvent>>>;
|
||||||
|
|
||||||
|
/// Batch embedding generation. Dimensionality is provider/model specific.
|
||||||
|
async fn generate_embeddings(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>>;
|
||||||
|
|
||||||
|
/// One-shot vision description of an image. Used to convert images into
|
||||||
|
/// plain text for the hybrid-mode conversation flow.
|
||||||
|
async fn describe_image(&self, image_base64: &str) -> Result<String>;
|
||||||
|
|
||||||
|
/// Enumerate available models with their capabilities.
|
||||||
|
async fn list_models(&self) -> Result<Vec<ModelCapabilities>>;
|
||||||
|
|
||||||
|
/// Look up capabilities for a single model.
|
||||||
|
async fn model_capabilities(&self, model: &str) -> Result<ModelCapabilities>;
|
||||||
|
|
||||||
|
/// Primary model identifier this client was constructed with.
|
||||||
|
fn primary_model(&self) -> &str;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Events emitted by streaming `chat_with_tools_stream`. A stream is a
|
||||||
|
/// sequence of zero or more `TextDelta` events followed by exactly one
|
||||||
|
/// `Done`. Callers should treat `Done` as terminal — further items (if any
|
||||||
|
/// slip through due to upstream misbehavior) are safe to ignore.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum LlmStreamEvent {
|
||||||
|
/// Incremental content token(s) from the model. Concatenate in order to
|
||||||
|
/// reconstruct the assistant's final text.
|
||||||
|
TextDelta(String),
|
||||||
|
/// Terminal event with the full assembled message (content + any
|
||||||
|
/// tool_calls). `message.content` equals the concatenation of every
|
||||||
|
/// preceding `TextDelta.0`.
|
||||||
|
Done {
|
||||||
|
message: ChatMessage,
|
||||||
|
prompt_eval_count: Option<i32>,
|
||||||
|
eval_count: Option<i32>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tool definition sent to the model (OpenAI-compatible function schema).
|
||||||
|
#[derive(Serialize, Clone, Debug)]
|
||||||
|
pub struct Tool {
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub tool_type: String, // always "function"
|
||||||
|
pub function: ToolFunction,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone, Debug)]
|
||||||
|
pub struct ToolFunction {
|
||||||
|
pub name: String,
|
||||||
|
pub description: String,
|
||||||
|
pub parameters: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tool {
|
||||||
|
pub fn function(name: &str, description: &str, parameters: serde_json::Value) -> Self {
|
||||||
|
Self {
|
||||||
|
tool_type: "function".to_string(),
|
||||||
|
function: ToolFunction {
|
||||||
|
name: name.to_string(),
|
||||||
|
description: description.to_string(),
|
||||||
|
parameters,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A message in the chat conversation history.
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct ChatMessage {
|
||||||
|
pub role: String, // "system" | "user" | "assistant" | "tool"
|
||||||
|
/// Empty string (not null) when tool_calls is present — Ollama quirk.
|
||||||
|
#[serde(default)]
|
||||||
|
pub content: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub tool_calls: Option<Vec<ToolCall>>,
|
||||||
|
/// Base64 images — only on user messages to vision-capable models.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub images: Option<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChatMessage {
|
||||||
|
pub fn system(content: impl Into<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
role: "system".to_string(),
|
||||||
|
content: content.into(),
|
||||||
|
tool_calls: None,
|
||||||
|
images: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn user(content: impl Into<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
role: "user".to_string(),
|
||||||
|
content: content.into(),
|
||||||
|
tool_calls: None,
|
||||||
|
images: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn tool_result(content: impl Into<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
role: "tool".to_string(),
|
||||||
|
content: content.into(),
|
||||||
|
tool_calls: None,
|
||||||
|
images: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tool call returned by the model in an assistant message.
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct ToolCall {
|
||||||
|
pub function: ToolCallFunction,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub id: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct ToolCallFunction {
|
||||||
|
pub name: String,
|
||||||
|
/// Canonical shape: native JSON. Providers that use JSON-encoded-string
|
||||||
|
/// arguments (OpenAI-compatible) translate at their wire boundary.
|
||||||
|
pub arguments: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct ModelCapabilities {
|
||||||
|
pub name: String,
|
||||||
|
pub has_vision: bool,
|
||||||
|
pub has_tool_calling: bool,
|
||||||
|
}
|
||||||
39
src/ai/mod.rs
Normal file
39
src/ai/mod.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
pub mod apollo_client;
|
||||||
|
pub mod daily_summary_job;
|
||||||
|
pub mod face_client;
|
||||||
|
pub mod handlers;
|
||||||
|
pub mod insight_chat;
|
||||||
|
pub mod insight_generator;
|
||||||
|
pub mod llm_client;
|
||||||
|
pub mod ollama;
|
||||||
|
pub mod openrouter;
|
||||||
|
pub mod sms_client;
|
||||||
|
|
||||||
|
// strip_summary_boilerplate is used by binaries (test_daily_summary), not the library
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
pub use daily_summary_job::{
|
||||||
|
DAILY_SUMMARY_MESSAGE_LIMIT, DAILY_SUMMARY_SYSTEM_PROMPT, build_daily_summary_prompt,
|
||||||
|
generate_daily_summaries, strip_summary_boilerplate,
|
||||||
|
};
|
||||||
|
pub use handlers::{
|
||||||
|
chat_history_handler, chat_rewind_handler, chat_stream_handler, chat_turn_handler,
|
||||||
|
delete_insight_handler, export_training_data_handler, generate_agentic_insight_handler,
|
||||||
|
generate_insight_handler, get_all_insights_handler, get_available_models_handler,
|
||||||
|
get_insight_handler, get_openrouter_models_handler, rate_insight_handler,
|
||||||
|
};
|
||||||
|
pub use insight_generator::InsightGenerator;
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
pub use llm_client::{
|
||||||
|
ChatMessage, LlmClient, ModelCapabilities, Tool, ToolCall, ToolCallFunction, ToolFunction,
|
||||||
|
};
|
||||||
|
pub use ollama::{EMBEDDING_MODEL, OllamaClient};
|
||||||
|
pub use sms_client::{SmsApiClient, SmsMessage};
|
||||||
|
|
||||||
|
/// Display name used for the user in message transcripts and first-person
|
||||||
|
/// prompt text. Reads the `USER_NAME` env var; defaults to `"Me"`. Models
|
||||||
|
/// often confuse `"Me:"` in a transcript with their own role — setting
|
||||||
|
/// `USER_NAME=Cameron` (or similar) in the environment eliminates that
|
||||||
|
/// ambiguity across daily summaries, insight generation, and chat.
|
||||||
|
pub fn user_display_name() -> String {
|
||||||
|
std::env::var("USER_NAME").unwrap_or_else(|_| "Me".to_string())
|
||||||
|
}
|
||||||
1347
src/ai/ollama.rs
Normal file
1347
src/ai/ollama.rs
Normal file
File diff suppressed because it is too large
Load Diff
998
src/ai/openrouter.rs
Normal file
998
src/ai/openrouter.rs
Normal file
@@ -0,0 +1,998 @@
|
|||||||
|
// First consumer lands in a later PR (hybrid backend routing). Tests exercise
|
||||||
|
// the translation helpers directly.
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use anyhow::{Context, Result, anyhow, bail};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::{Value, json};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use crate::ai::llm_client::{
|
||||||
|
ChatMessage, LlmClient, LlmStreamEvent, ModelCapabilities, Tool, ToolCall, ToolCallFunction,
|
||||||
|
};
|
||||||
|
use futures::stream::{BoxStream, StreamExt};
|
||||||
|
|
||||||
|
const DEFAULT_BASE_URL: &str = "https://openrouter.ai/api/v1";
|
||||||
|
const DEFAULT_EMBEDDING_MODEL: &str = "openai/text-embedding-3-small";
|
||||||
|
const CACHE_DURATION_SECS: u64 = 15 * 60;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct CachedEntry<T> {
|
||||||
|
data: T,
|
||||||
|
cached_at: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> CachedEntry<T> {
|
||||||
|
fn new(data: T) -> Self {
|
||||||
|
Self {
|
||||||
|
data,
|
||||||
|
cached_at: Instant::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_expired(&self) -> bool {
|
||||||
|
self.cached_at.elapsed().as_secs() > CACHE_DURATION_SECS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
static ref MODEL_CAPABILITIES_CACHE: Arc<Mutex<HashMap<String, CachedEntry<Vec<ModelCapabilities>>>>> =
|
||||||
|
Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OpenAI-compatible client for OpenRouter (https://openrouter.ai).
|
||||||
|
///
|
||||||
|
/// Translates canonical `ChatMessage` / `Tool` shapes to OpenAI wire format:
|
||||||
|
/// - Tool-call `arguments` serialized as JSON-encoded strings (vs Ollama's
|
||||||
|
/// native JSON).
|
||||||
|
/// - Image content rewritten into content-parts array with `image_url` entries.
|
||||||
|
/// - `role=tool` messages attach a `tool_call_id` inferred from the preceding
|
||||||
|
/// assistant turn's tool call.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct OpenRouterClient {
|
||||||
|
client: Client,
|
||||||
|
pub api_key: String,
|
||||||
|
pub base_url: String,
|
||||||
|
pub primary_model: String,
|
||||||
|
pub embedding_model: String,
|
||||||
|
num_ctx: Option<i32>,
|
||||||
|
temperature: Option<f32>,
|
||||||
|
top_p: Option<f32>,
|
||||||
|
top_k: Option<i32>,
|
||||||
|
min_p: Option<f32>,
|
||||||
|
/// Optional `HTTP-Referer` header OpenRouter uses for attribution.
|
||||||
|
pub referer: Option<String>,
|
||||||
|
/// Optional `X-Title` header OpenRouter uses for attribution.
|
||||||
|
pub app_title: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenRouterClient {
|
||||||
|
pub fn new(api_key: String, base_url: Option<String>, primary_model: String) -> Self {
|
||||||
|
Self {
|
||||||
|
client: Client::builder()
|
||||||
|
.connect_timeout(Duration::from_secs(10))
|
||||||
|
.timeout(Duration::from_secs(180))
|
||||||
|
.build()
|
||||||
|
.unwrap_or_else(|_| Client::new()),
|
||||||
|
api_key,
|
||||||
|
base_url: base_url.unwrap_or_else(|| DEFAULT_BASE_URL.to_string()),
|
||||||
|
primary_model,
|
||||||
|
embedding_model: DEFAULT_EMBEDDING_MODEL.to_string(),
|
||||||
|
num_ctx: None,
|
||||||
|
temperature: None,
|
||||||
|
top_p: None,
|
||||||
|
top_k: None,
|
||||||
|
min_p: None,
|
||||||
|
referer: None,
|
||||||
|
app_title: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_embedding_model(&mut self, model: String) {
|
||||||
|
self.embedding_model = model;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn set_num_ctx(&mut self, num_ctx: Option<i32>) {
|
||||||
|
self.num_ctx = num_ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn set_sampling_params(
|
||||||
|
&mut self,
|
||||||
|
temperature: Option<f32>,
|
||||||
|
top_p: Option<f32>,
|
||||||
|
top_k: Option<i32>,
|
||||||
|
min_p: Option<f32>,
|
||||||
|
) {
|
||||||
|
self.temperature = temperature;
|
||||||
|
self.top_p = top_p;
|
||||||
|
self.top_k = top_k;
|
||||||
|
self.min_p = min_p;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_attribution(&mut self, referer: Option<String>, app_title: Option<String>) {
|
||||||
|
self.referer = referer;
|
||||||
|
self.app_title = app_title;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn authed(&self, builder: reqwest::RequestBuilder) -> reqwest::RequestBuilder {
|
||||||
|
let mut b = builder.bearer_auth(&self.api_key);
|
||||||
|
if let Some(r) = &self.referer {
|
||||||
|
b = b.header("HTTP-Referer", r);
|
||||||
|
}
|
||||||
|
if let Some(t) = &self.app_title {
|
||||||
|
b = b.header("X-Title", t);
|
||||||
|
}
|
||||||
|
b
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Translate canonical messages to the OpenAI-compatible wire shape.
|
||||||
|
///
|
||||||
|
/// Walks in order so it can attach `tool_call_id` to `role=tool` messages
|
||||||
|
/// based on the most recent assistant turn's tool call.
|
||||||
|
fn messages_to_openai(messages: &[ChatMessage]) -> Vec<Value> {
|
||||||
|
let mut out = Vec::with_capacity(messages.len());
|
||||||
|
let mut last_tool_call_ids: Vec<String> = Vec::new();
|
||||||
|
let mut next_tool_result_idx: usize = 0;
|
||||||
|
|
||||||
|
for msg in messages {
|
||||||
|
let mut obj = serde_json::Map::new();
|
||||||
|
obj.insert("role".into(), Value::String(msg.role.clone()));
|
||||||
|
|
||||||
|
// Content: string OR content-parts array (when images present).
|
||||||
|
match &msg.images {
|
||||||
|
Some(images) if !images.is_empty() => {
|
||||||
|
let mut parts: Vec<Value> = Vec::new();
|
||||||
|
if !msg.content.is_empty() {
|
||||||
|
parts.push(json!({"type": "text", "text": msg.content}));
|
||||||
|
}
|
||||||
|
for img in images {
|
||||||
|
let url = image_to_data_url(img);
|
||||||
|
parts.push(json!({
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": { "url": url }
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
obj.insert("content".into(), Value::Array(parts));
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
obj.insert("content".into(), Value::String(msg.content.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assistant message with tool_calls: stringify arguments, remember
|
||||||
|
// the ids so the subsequent tool messages can reference them.
|
||||||
|
if let Some(tcs) = &msg.tool_calls
|
||||||
|
&& msg.role == "assistant"
|
||||||
|
{
|
||||||
|
let converted: Vec<Value> = tcs
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, call)| {
|
||||||
|
let id = call.id.clone().unwrap_or_else(|| format!("call_{}", i));
|
||||||
|
let args_str = serde_json::to_string(&call.function.arguments)
|
||||||
|
.unwrap_or_else(|_| "{}".to_string());
|
||||||
|
json!({
|
||||||
|
"id": id,
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": call.function.name,
|
||||||
|
"arguments": args_str,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
last_tool_call_ids = converted
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.get("id").and_then(|x| x.as_str()).map(String::from))
|
||||||
|
.collect();
|
||||||
|
next_tool_result_idx = 0;
|
||||||
|
obj.insert("tool_calls".into(), Value::Array(converted));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tool result messages: attach tool_call_id from the last assistant turn.
|
||||||
|
if msg.role == "tool" {
|
||||||
|
let id = last_tool_call_ids
|
||||||
|
.get(next_tool_result_idx)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| "call_0".to_string());
|
||||||
|
obj.insert("tool_call_id".into(), Value::String(id));
|
||||||
|
next_tool_result_idx += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
out.push(Value::Object(obj));
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse an OpenAI-compatible assistant message back into canonical shape.
|
||||||
|
fn openai_message_to_chat(msg: &Value) -> Result<ChatMessage> {
|
||||||
|
let obj = msg
|
||||||
|
.as_object()
|
||||||
|
.ok_or_else(|| anyhow!("response message is not an object"))?;
|
||||||
|
let role = obj
|
||||||
|
.get("role")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("assistant")
|
||||||
|
.to_string();
|
||||||
|
let content = obj
|
||||||
|
.get("content")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let tool_calls = if let Some(tcs) = obj.get("tool_calls").and_then(|v| v.as_array()) {
|
||||||
|
let mut parsed = Vec::with_capacity(tcs.len());
|
||||||
|
for tc in tcs {
|
||||||
|
let id = tc.get("id").and_then(|v| v.as_str()).map(String::from);
|
||||||
|
let function = tc
|
||||||
|
.get("function")
|
||||||
|
.ok_or_else(|| anyhow!("tool_call missing function field"))?;
|
||||||
|
let name = function
|
||||||
|
.get("name")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_string();
|
||||||
|
let args_value = match function.get("arguments") {
|
||||||
|
// OpenAI-compat: stringified JSON.
|
||||||
|
Some(Value::String(s)) => {
|
||||||
|
serde_json::from_str::<Value>(s).unwrap_or_else(|_| json!({}))
|
||||||
|
}
|
||||||
|
// Some providers emit arguments as an object directly — accept both.
|
||||||
|
Some(v @ Value::Object(_)) => v.clone(),
|
||||||
|
_ => json!({}),
|
||||||
|
};
|
||||||
|
parsed.push(ToolCall {
|
||||||
|
id,
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name,
|
||||||
|
arguments: args_value,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some(parsed)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ChatMessage {
|
||||||
|
role,
|
||||||
|
content,
|
||||||
|
tool_calls,
|
||||||
|
images: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_options(&self) -> Vec<(&'static str, Value)> {
|
||||||
|
let mut v = Vec::new();
|
||||||
|
if let Some(t) = self.temperature {
|
||||||
|
v.push(("temperature", json!(t)));
|
||||||
|
}
|
||||||
|
if let Some(p) = self.top_p {
|
||||||
|
v.push(("top_p", json!(p)));
|
||||||
|
}
|
||||||
|
if let Some(k) = self.top_k {
|
||||||
|
v.push(("top_k", json!(k)));
|
||||||
|
}
|
||||||
|
if let Some(m) = self.min_p {
|
||||||
|
v.push(("min_p", json!(m)));
|
||||||
|
}
|
||||||
|
if let Some(c) = self.num_ctx {
|
||||||
|
// OpenAI uses max_tokens for generation bound; num_ctx isn't
|
||||||
|
// directly transferable. Skip rather than silently mis-map.
|
||||||
|
let _ = c;
|
||||||
|
}
|
||||||
|
v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl LlmClient for OpenRouterClient {
|
||||||
|
async fn generate(
|
||||||
|
&self,
|
||||||
|
prompt: &str,
|
||||||
|
system: Option<&str>,
|
||||||
|
images: Option<Vec<String>>,
|
||||||
|
) -> Result<String> {
|
||||||
|
let mut messages: Vec<ChatMessage> = Vec::new();
|
||||||
|
if let Some(sys) = system {
|
||||||
|
messages.push(ChatMessage::system(sys));
|
||||||
|
}
|
||||||
|
let mut user = ChatMessage::user(prompt);
|
||||||
|
user.images = images;
|
||||||
|
messages.push(user);
|
||||||
|
|
||||||
|
let (reply, _, _) = self.chat_with_tools(messages, Vec::new()).await?;
|
||||||
|
Ok(reply.content)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chat_with_tools(
|
||||||
|
&self,
|
||||||
|
messages: Vec<ChatMessage>,
|
||||||
|
tools: Vec<Tool>,
|
||||||
|
) -> Result<(ChatMessage, Option<i32>, Option<i32>)> {
|
||||||
|
let url = format!("{}/chat/completions", self.base_url);
|
||||||
|
let mut body = serde_json::Map::new();
|
||||||
|
body.insert("model".into(), Value::String(self.primary_model.clone()));
|
||||||
|
body.insert(
|
||||||
|
"messages".into(),
|
||||||
|
Value::Array(Self::messages_to_openai(&messages)),
|
||||||
|
);
|
||||||
|
body.insert("stream".into(), Value::Bool(false));
|
||||||
|
if !tools.is_empty() {
|
||||||
|
body.insert(
|
||||||
|
"tools".into(),
|
||||||
|
serde_json::to_value(&tools).context("serializing tools")?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
for (k, v) in self.build_options() {
|
||||||
|
body.insert(k.into(), v);
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"OpenRouter chat_with_tools: model={} messages={} tools={}",
|
||||||
|
self.primary_model,
|
||||||
|
messages.len(),
|
||||||
|
tools.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
let resp = self
|
||||||
|
.authed(self.client.post(&url))
|
||||||
|
.json(&Value::Object(body))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("POST {} failed", url))?;
|
||||||
|
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
let status = resp.status();
|
||||||
|
let body = resp.text().await.unwrap_or_default();
|
||||||
|
bail!("OpenRouter chat request failed: {} — {}", status, body);
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed: Value = resp.json().await.context("parsing chat response")?;
|
||||||
|
let choice = parsed
|
||||||
|
.get("choices")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.and_then(|a| a.first())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
anyhow!(
|
||||||
|
"response missing choices[0]: {}",
|
||||||
|
extract_openrouter_error_detail(&parsed)
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let msg = choice.get("message").ok_or_else(|| {
|
||||||
|
anyhow!(
|
||||||
|
"choices[0] missing message: {}",
|
||||||
|
extract_openrouter_error_detail(&parsed)
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let chat_msg = Self::openai_message_to_chat(msg)?;
|
||||||
|
|
||||||
|
let usage = parsed.get("usage");
|
||||||
|
let prompt_tokens = usage
|
||||||
|
.and_then(|u| u.get("prompt_tokens"))
|
||||||
|
.and_then(|v| v.as_i64())
|
||||||
|
.map(|n| n as i32);
|
||||||
|
let completion_tokens = usage
|
||||||
|
.and_then(|u| u.get("completion_tokens"))
|
||||||
|
.and_then(|v| v.as_i64())
|
||||||
|
.map(|n| n as i32);
|
||||||
|
|
||||||
|
Ok((chat_msg, prompt_tokens, completion_tokens))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chat_with_tools_stream(
|
||||||
|
&self,
|
||||||
|
messages: Vec<ChatMessage>,
|
||||||
|
tools: Vec<Tool>,
|
||||||
|
) -> Result<BoxStream<'static, Result<LlmStreamEvent>>> {
|
||||||
|
let url = format!("{}/chat/completions", self.base_url);
|
||||||
|
let mut body = serde_json::Map::new();
|
||||||
|
body.insert("model".into(), Value::String(self.primary_model.clone()));
|
||||||
|
body.insert(
|
||||||
|
"messages".into(),
|
||||||
|
Value::Array(Self::messages_to_openai(&messages)),
|
||||||
|
);
|
||||||
|
body.insert("stream".into(), Value::Bool(true));
|
||||||
|
// Ask for usage data in the final chunk (OpenAI + OpenRouter
|
||||||
|
// both honor this options bag).
|
||||||
|
body.insert(
|
||||||
|
"stream_options".into(),
|
||||||
|
serde_json::json!({ "include_usage": true }),
|
||||||
|
);
|
||||||
|
if !tools.is_empty() {
|
||||||
|
body.insert(
|
||||||
|
"tools".into(),
|
||||||
|
serde_json::to_value(&tools).context("serializing tools")?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
for (k, v) in self.build_options() {
|
||||||
|
body.insert(k.into(), v);
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = self
|
||||||
|
.authed(self.client.post(&url))
|
||||||
|
.json(&Value::Object(body))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("POST {} failed", url))?;
|
||||||
|
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
let status = resp.status();
|
||||||
|
let body = resp.text().await.unwrap_or_default();
|
||||||
|
bail!("OpenRouter stream request failed: {} — {}", status, body);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAI-compat SSE stream. Each event is `data: <json>\n\n`, with
|
||||||
|
// `data: [DONE]` signalling completion. Tool calls arrive as
|
||||||
|
// `delta.tool_calls[i]` chunks that must be concatenated by index.
|
||||||
|
let byte_stream = resp.bytes_stream();
|
||||||
|
let stream = async_stream::stream! {
|
||||||
|
let mut byte_stream = byte_stream;
|
||||||
|
let mut buf: Vec<u8> = Vec::new();
|
||||||
|
let mut accumulated_content = String::new();
|
||||||
|
// tool call state: index -> (id, name, args_string)
|
||||||
|
let mut tool_state: std::collections::BTreeMap<
|
||||||
|
usize,
|
||||||
|
(Option<String>, Option<String>, String),
|
||||||
|
> = std::collections::BTreeMap::new();
|
||||||
|
let mut role = "assistant".to_string();
|
||||||
|
let mut prompt_tokens: Option<i32> = None;
|
||||||
|
let mut completion_tokens: Option<i32> = None;
|
||||||
|
let mut done_seen = false;
|
||||||
|
|
||||||
|
while let Some(chunk) = byte_stream.next().await {
|
||||||
|
let chunk = match chunk {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
yield Err(anyhow!("stream read failed: {}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
buf.extend_from_slice(&chunk);
|
||||||
|
|
||||||
|
// SSE frames are delimited by a blank line. Walk the buffer
|
||||||
|
// for "\n\n" markers; anything before them is a complete
|
||||||
|
// frame (possibly multi-line).
|
||||||
|
while let Some(sep) = find_double_newline(&buf) {
|
||||||
|
let frame = buf.drain(..sep + 2).collect::<Vec<_>>();
|
||||||
|
let frame_str = match std::str::from_utf8(&frame) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
// A frame is one or more lines; the payload is on data:
|
||||||
|
// lines. Ignore comments and other fields.
|
||||||
|
for line in frame_str.lines() {
|
||||||
|
let line = line.trim_end_matches('\r');
|
||||||
|
let payload = match line.strip_prefix("data: ") {
|
||||||
|
Some(p) => p,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
if payload == "[DONE]" {
|
||||||
|
done_seen = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let v: Value = match serde_json::from_str(payload) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!(
|
||||||
|
"malformed OpenRouter SSE frame: {} ({})",
|
||||||
|
payload,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Usage can arrive in a dedicated final frame with
|
||||||
|
// empty choices.
|
||||||
|
if let Some(usage) = v.get("usage") {
|
||||||
|
prompt_tokens = usage
|
||||||
|
.get("prompt_tokens")
|
||||||
|
.and_then(|n| n.as_i64())
|
||||||
|
.map(|n| n as i32);
|
||||||
|
completion_tokens = usage
|
||||||
|
.get("completion_tokens")
|
||||||
|
.and_then(|n| n.as_i64())
|
||||||
|
.map(|n| n as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(choices) = v.get("choices").and_then(|c| c.as_array())
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some(choice) = choices.first() else { continue };
|
||||||
|
let delta = match choice.get("delta") {
|
||||||
|
Some(d) => d,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
if let Some(r) = delta.get("role").and_then(|v| v.as_str()) {
|
||||||
|
role = r.to_string();
|
||||||
|
}
|
||||||
|
if let Some(content) =
|
||||||
|
delta.get("content").and_then(|v| v.as_str())
|
||||||
|
&& !content.is_empty()
|
||||||
|
{
|
||||||
|
accumulated_content.push_str(content);
|
||||||
|
yield Ok(LlmStreamEvent::TextDelta(content.to_string()));
|
||||||
|
}
|
||||||
|
if let Some(tcs) = delta.get("tool_calls").and_then(|v| v.as_array()) {
|
||||||
|
for tc_delta in tcs {
|
||||||
|
let idx = tc_delta
|
||||||
|
.get("index")
|
||||||
|
.and_then(|n| n.as_u64())
|
||||||
|
.unwrap_or(0) as usize;
|
||||||
|
let entry = tool_state
|
||||||
|
.entry(idx)
|
||||||
|
.or_insert((None, None, String::new()));
|
||||||
|
if let Some(id) =
|
||||||
|
tc_delta.get("id").and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
entry.0 = Some(id.to_string());
|
||||||
|
}
|
||||||
|
if let Some(func) = tc_delta.get("function") {
|
||||||
|
if let Some(name) =
|
||||||
|
func.get("name").and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
entry.1 = Some(name.to_string());
|
||||||
|
}
|
||||||
|
if let Some(args) =
|
||||||
|
func.get("arguments").and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
entry.2.push_str(args);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if done_seen {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if done_seen {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize tool calls: parse accumulated argument strings.
|
||||||
|
let tool_calls: Option<Vec<ToolCall>> = if tool_state.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let mut v = Vec::with_capacity(tool_state.len());
|
||||||
|
for (_idx, (id, name, args)) in tool_state {
|
||||||
|
let arguments: Value = if args.trim().is_empty() {
|
||||||
|
Value::Object(Default::default())
|
||||||
|
} else {
|
||||||
|
serde_json::from_str(&args).unwrap_or_else(|_| {
|
||||||
|
Value::Object(Default::default())
|
||||||
|
})
|
||||||
|
};
|
||||||
|
v.push(ToolCall {
|
||||||
|
id,
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: name.unwrap_or_default(),
|
||||||
|
arguments,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some(v)
|
||||||
|
};
|
||||||
|
|
||||||
|
let message = ChatMessage {
|
||||||
|
role,
|
||||||
|
content: accumulated_content,
|
||||||
|
tool_calls,
|
||||||
|
images: None,
|
||||||
|
};
|
||||||
|
yield Ok(LlmStreamEvent::Done {
|
||||||
|
message,
|
||||||
|
prompt_eval_count: prompt_tokens,
|
||||||
|
eval_count: completion_tokens,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Box::pin(stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn generate_embeddings(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>> {
|
||||||
|
let url = format!("{}/embeddings", self.base_url);
|
||||||
|
let body = json!({
|
||||||
|
"model": self.embedding_model,
|
||||||
|
"input": texts,
|
||||||
|
});
|
||||||
|
|
||||||
|
let resp = self
|
||||||
|
.authed(self.client.post(&url))
|
||||||
|
.json(&body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("POST {} failed", url))?;
|
||||||
|
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
let status = resp.status();
|
||||||
|
let body = resp.text().await.unwrap_or_default();
|
||||||
|
bail!("OpenRouter embedding request failed: {} — {}", status, body);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct EmbedResponse {
|
||||||
|
data: Vec<EmbedItem>,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct EmbedItem {
|
||||||
|
embedding: Vec<f32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed: EmbedResponse = resp.json().await.context("parsing embed response")?;
|
||||||
|
Ok(parsed.data.into_iter().map(|i| i.embedding).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn describe_image(&self, image_base64: &str) -> Result<String> {
|
||||||
|
let prompt = "Briefly describe what you see in this image in 1-2 sentences. \
|
||||||
|
Focus on the people, location, and activity.";
|
||||||
|
self.generate(
|
||||||
|
prompt,
|
||||||
|
Some("You are a scene description assistant. Be concise and factual."),
|
||||||
|
Some(vec![image_base64.to_string()]),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_models(&self) -> Result<Vec<ModelCapabilities>> {
|
||||||
|
{
|
||||||
|
let cache = MODEL_CAPABILITIES_CACHE.lock().unwrap();
|
||||||
|
if let Some(entry) = cache.get(&self.base_url)
|
||||||
|
&& !entry.is_expired()
|
||||||
|
{
|
||||||
|
return Ok(entry.data.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let url = format!("{}/models", self.base_url);
|
||||||
|
let resp = self
|
||||||
|
.authed(self.client.get(&url))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("GET {} failed", url))?;
|
||||||
|
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
let status = resp.status();
|
||||||
|
let body = resp.text().await.unwrap_or_default();
|
||||||
|
bail!("OpenRouter list_models failed: {} — {}", status, body);
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed: Value = resp.json().await.context("parsing models response")?;
|
||||||
|
let data = parsed
|
||||||
|
.get("data")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.ok_or_else(|| anyhow!("models response missing data[]"))?;
|
||||||
|
|
||||||
|
let caps: Vec<ModelCapabilities> = data.iter().map(parse_model_capabilities).collect();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut cache = MODEL_CAPABILITIES_CACHE.lock().unwrap();
|
||||||
|
cache.insert(self.base_url.clone(), CachedEntry::new(caps.clone()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(caps)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn model_capabilities(&self, model: &str) -> Result<ModelCapabilities> {
|
||||||
|
let all = self.list_models().await?;
|
||||||
|
all.into_iter()
|
||||||
|
.find(|m| m.name == model)
|
||||||
|
.ok_or_else(|| anyhow!("model '{}' not found on OpenRouter", model))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn primary_model(&self) -> &str {
|
||||||
|
&self.primary_model
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract a diagnostic fragment from an OpenRouter response body that
|
||||||
|
/// doesn't match the expected `{choices: [...]}` shape. OpenRouter will
|
||||||
|
/// sometimes return 200 OK with `{"error": {"message": "...", "code": ...}}`
|
||||||
|
/// when the upstream provider (Anthropic/OpenAI/Google/etc) errored out
|
||||||
|
/// — rate limits, content moderation, model overload, provider timeout.
|
||||||
|
/// Surface the structured error if present; otherwise fall back to a
|
||||||
|
/// truncated raw-JSON view so the log line is actionable.
|
||||||
|
fn extract_openrouter_error_detail(parsed: &Value) -> String {
|
||||||
|
if let Some(err) = parsed.get("error") {
|
||||||
|
let message = err
|
||||||
|
.get("message")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("(no message)");
|
||||||
|
let code = err
|
||||||
|
.get("code")
|
||||||
|
.map(|v| match v {
|
||||||
|
Value::String(s) => s.clone(),
|
||||||
|
other => other.to_string(),
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| "?".to_string());
|
||||||
|
let short_message: String = message.chars().take(240).collect();
|
||||||
|
return format!("error code={} message=\"{}\"", code, short_message);
|
||||||
|
}
|
||||||
|
let raw = parsed.to_string();
|
||||||
|
raw.chars().take(300).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the byte offset of the first `\n\n` (end of an SSE frame) in `buf`.
|
||||||
|
/// Returns the index of the first `\n` of the pair, so the full separator is
|
||||||
|
/// `buf[idx..=idx+1]`. Also handles `\r\n\r\n` since some servers emit it.
|
||||||
|
fn find_double_newline(buf: &[u8]) -> Option<usize> {
|
||||||
|
for i in 0..buf.len().saturating_sub(1) {
|
||||||
|
if buf[i] == b'\n' && buf[i + 1] == b'\n' {
|
||||||
|
return Some(i);
|
||||||
|
}
|
||||||
|
// \r\n\r\n: the second \n of this pattern is at i+2; flag at i so the
|
||||||
|
// drain call (which consumes ..sep+2) takes exactly the frame.
|
||||||
|
if i + 3 < buf.len()
|
||||||
|
&& buf[i] == b'\r'
|
||||||
|
&& buf[i + 1] == b'\n'
|
||||||
|
&& buf[i + 2] == b'\r'
|
||||||
|
&& buf[i + 3] == b'\n'
|
||||||
|
{
|
||||||
|
return Some(i + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a `data:` URL if the provided string is raw base64, otherwise pass it through.
|
||||||
|
fn image_to_data_url(img: &str) -> String {
|
||||||
|
if img.starts_with("data:") {
|
||||||
|
img.to_string()
|
||||||
|
} else {
|
||||||
|
format!("data:image/jpeg;base64,{}", img)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_model_capabilities(m: &Value) -> ModelCapabilities {
|
||||||
|
let name = m
|
||||||
|
.get("id")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_string();
|
||||||
|
let has_tool_calling = m
|
||||||
|
.get("supported_parameters")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.map(|arr| arr.iter().any(|x| x.as_str() == Some("tools")))
|
||||||
|
.unwrap_or(false);
|
||||||
|
let has_vision = m
|
||||||
|
.get("architecture")
|
||||||
|
.and_then(|v| v.get("input_modalities"))
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.map(|arr| arr.iter().any(|x| x.as_str() == Some("image")))
|
||||||
|
.unwrap_or(false);
|
||||||
|
ModelCapabilities {
|
||||||
|
name,
|
||||||
|
has_vision,
|
||||||
|
has_tool_calling,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tool_call_arguments_stringified_on_send() {
|
||||||
|
let mut msg = ChatMessage {
|
||||||
|
role: "assistant".into(),
|
||||||
|
content: String::new(),
|
||||||
|
tool_calls: Some(vec![ToolCall {
|
||||||
|
id: Some("call_abc".into()),
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: "search_sms".into(),
|
||||||
|
arguments: json!({"query": "hello", "limit": 5}),
|
||||||
|
},
|
||||||
|
}]),
|
||||||
|
images: None,
|
||||||
|
};
|
||||||
|
msg.tool_calls.as_mut().unwrap()[0].function.arguments =
|
||||||
|
json!({"query": "hello", "limit": 5});
|
||||||
|
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[msg]);
|
||||||
|
let tcs = wire[0]
|
||||||
|
.get("tool_calls")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.expect("tool_calls present");
|
||||||
|
let args = tcs[0]
|
||||||
|
.get("function")
|
||||||
|
.and_then(|f| f.get("arguments"))
|
||||||
|
.and_then(|a| a.as_str())
|
||||||
|
.expect("arguments stringified");
|
||||||
|
let parsed: Value = serde_json::from_str(args).unwrap();
|
||||||
|
assert_eq!(parsed["query"], "hello");
|
||||||
|
assert_eq!(parsed["limit"], 5);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tool_call_arguments_parsed_on_receive() {
|
||||||
|
let response_msg = json!({
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "",
|
||||||
|
"tool_calls": [{
|
||||||
|
"id": "call_xyz",
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_weather",
|
||||||
|
"arguments": "{\"city\":\"Boston\",\"units\":\"celsius\"}"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
|
||||||
|
let parsed = OpenRouterClient::openai_message_to_chat(&response_msg).unwrap();
|
||||||
|
let tcs = parsed.tool_calls.unwrap();
|
||||||
|
assert_eq!(tcs.len(), 1);
|
||||||
|
assert_eq!(tcs[0].function.name, "get_weather");
|
||||||
|
assert_eq!(tcs[0].function.arguments["city"], "Boston");
|
||||||
|
assert_eq!(tcs[0].function.arguments["units"], "celsius");
|
||||||
|
assert_eq!(tcs[0].id.as_deref(), Some("call_xyz"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tool_call_arguments_accept_native_json_on_receive() {
|
||||||
|
// Some providers return arguments as an object directly; accept both.
|
||||||
|
let response_msg = json!({
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "",
|
||||||
|
"tool_calls": [{
|
||||||
|
"id": "call_1",
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "foo",
|
||||||
|
"arguments": {"nested": {"k": 1}}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
let parsed = OpenRouterClient::openai_message_to_chat(&response_msg).unwrap();
|
||||||
|
let tc = &parsed.tool_calls.unwrap()[0];
|
||||||
|
assert_eq!(tc.function.arguments["nested"]["k"], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn images_become_content_parts() {
|
||||||
|
let mut msg = ChatMessage::user("What is in this photo?");
|
||||||
|
msg.images = Some(vec!["BASE64DATA".into()]);
|
||||||
|
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[msg]);
|
||||||
|
let content = wire[0].get("content").and_then(|v| v.as_array()).unwrap();
|
||||||
|
assert_eq!(content.len(), 2);
|
||||||
|
assert_eq!(content[0]["type"], "text");
|
||||||
|
assert_eq!(content[0]["text"], "What is in this photo?");
|
||||||
|
assert_eq!(content[1]["type"], "image_url");
|
||||||
|
assert_eq!(
|
||||||
|
content[1]["image_url"]["url"],
|
||||||
|
"data:image/jpeg;base64,BASE64DATA"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn data_url_images_pass_through_unchanged() {
|
||||||
|
let mut msg = ChatMessage::user("");
|
||||||
|
msg.images = Some(vec!["data:image/png;base64,ABCDEF".into()]);
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[msg]);
|
||||||
|
let content = wire[0].get("content").and_then(|v| v.as_array()).unwrap();
|
||||||
|
// No text part when content is empty.
|
||||||
|
assert_eq!(content.len(), 1);
|
||||||
|
assert_eq!(
|
||||||
|
content[0]["image_url"]["url"],
|
||||||
|
"data:image/png;base64,ABCDEF"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn text_only_message_stays_string() {
|
||||||
|
let msg = ChatMessage::user("hello");
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[msg]);
|
||||||
|
assert_eq!(wire[0]["content"], "hello");
|
||||||
|
assert!(wire[0]["content"].as_str().is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tool_result_inherits_tool_call_id_from_prior_assistant() {
|
||||||
|
let assistant = ChatMessage {
|
||||||
|
role: "assistant".into(),
|
||||||
|
content: String::new(),
|
||||||
|
tool_calls: Some(vec![ToolCall {
|
||||||
|
id: Some("call_42".into()),
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: "lookup".into(),
|
||||||
|
arguments: json!({}),
|
||||||
|
},
|
||||||
|
}]),
|
||||||
|
images: None,
|
||||||
|
};
|
||||||
|
let tool_result = ChatMessage::tool_result("found it");
|
||||||
|
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[assistant, tool_result]);
|
||||||
|
assert_eq!(wire[1]["role"], "tool");
|
||||||
|
assert_eq!(wire[1]["tool_call_id"], "call_42");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn multiple_tool_results_map_to_sequential_call_ids() {
|
||||||
|
let assistant = ChatMessage {
|
||||||
|
role: "assistant".into(),
|
||||||
|
content: String::new(),
|
||||||
|
tool_calls: Some(vec![
|
||||||
|
ToolCall {
|
||||||
|
id: Some("call_A".into()),
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: "a".into(),
|
||||||
|
arguments: json!({}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ToolCall {
|
||||||
|
id: Some("call_B".into()),
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: "b".into(),
|
||||||
|
arguments: json!({}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]),
|
||||||
|
images: None,
|
||||||
|
};
|
||||||
|
let r1 = ChatMessage::tool_result("a result");
|
||||||
|
let r2 = ChatMessage::tool_result("b result");
|
||||||
|
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[assistant, r1, r2]);
|
||||||
|
assert_eq!(wire[1]["tool_call_id"], "call_A");
|
||||||
|
assert_eq!(wire[2]["tool_call_id"], "call_B");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn missing_tool_call_id_gets_synthetic_fallback() {
|
||||||
|
let assistant = ChatMessage {
|
||||||
|
role: "assistant".into(),
|
||||||
|
content: String::new(),
|
||||||
|
tool_calls: Some(vec![ToolCall {
|
||||||
|
id: None,
|
||||||
|
function: ToolCallFunction {
|
||||||
|
name: "noid".into(),
|
||||||
|
arguments: json!({}),
|
||||||
|
},
|
||||||
|
}]),
|
||||||
|
images: None,
|
||||||
|
};
|
||||||
|
let wire = OpenRouterClient::messages_to_openai(&[assistant]);
|
||||||
|
let tcs = wire[0]
|
||||||
|
.get("tool_calls")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(tcs[0]["id"], "call_0");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_model_capabilities_extracts_tools_and_vision() {
|
||||||
|
let m = json!({
|
||||||
|
"id": "anthropic/claude-sonnet-4",
|
||||||
|
"supported_parameters": ["temperature", "top_p", "tools", "max_tokens"],
|
||||||
|
"architecture": {
|
||||||
|
"input_modalities": ["text", "image"]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let caps = parse_model_capabilities(&m);
|
||||||
|
assert_eq!(caps.name, "anthropic/claude-sonnet-4");
|
||||||
|
assert!(caps.has_tool_calling);
|
||||||
|
assert!(caps.has_vision);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_model_capabilities_handles_missing_fields() {
|
||||||
|
let m = json!({
|
||||||
|
"id": "some/text-only-model"
|
||||||
|
});
|
||||||
|
let caps = parse_model_capabilities(&m);
|
||||||
|
assert_eq!(caps.name, "some/text-only-model");
|
||||||
|
assert!(!caps.has_tool_calling);
|
||||||
|
assert!(!caps.has_vision);
|
||||||
|
}
|
||||||
|
}
|
||||||
381
src/ai/sms_client.rs
Normal file
381
src/ai/sms_client.rs
Normal file
@@ -0,0 +1,381 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
use super::ollama::OllamaClient;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct SmsApiClient {
|
||||||
|
client: Client,
|
||||||
|
base_url: String,
|
||||||
|
token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SmsApiClient {
|
||||||
|
pub fn new(base_url: String, token: Option<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
client: Client::new(),
|
||||||
|
base_url,
|
||||||
|
token,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch messages for a specific contact within ±4 days of the given timestamp
|
||||||
|
/// Falls back to all contacts if no messages found for the specific contact
|
||||||
|
/// Messages are sorted by proximity to the center timestamp
|
||||||
|
pub async fn fetch_messages_for_contact(
|
||||||
|
&self,
|
||||||
|
contact: Option<&str>,
|
||||||
|
center_timestamp: i64,
|
||||||
|
) -> Result<Vec<SmsMessage>> {
|
||||||
|
use chrono::Duration;
|
||||||
|
|
||||||
|
// Calculate ±4 days range around the center timestamp
|
||||||
|
let center_dt = chrono::DateTime::from_timestamp(center_timestamp, 0)
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("Invalid timestamp"))?;
|
||||||
|
|
||||||
|
let start_dt = center_dt - Duration::days(4);
|
||||||
|
let end_dt = center_dt + Duration::days(4);
|
||||||
|
|
||||||
|
let start_ts = start_dt.timestamp();
|
||||||
|
let end_ts = end_dt.timestamp();
|
||||||
|
|
||||||
|
// If contact specified, try fetching for that contact first
|
||||||
|
if let Some(contact_name) = contact {
|
||||||
|
log::info!(
|
||||||
|
"Fetching SMS for contact: {} (±4 days from {})",
|
||||||
|
contact_name,
|
||||||
|
center_dt.format("%Y-%m-%d %H:%M:%S")
|
||||||
|
);
|
||||||
|
let messages = self
|
||||||
|
.fetch_messages(start_ts, end_ts, Some(contact_name), Some(center_timestamp))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if !messages.is_empty() {
|
||||||
|
log::info!(
|
||||||
|
"Found {} messages for contact {}",
|
||||||
|
messages.len(),
|
||||||
|
contact_name
|
||||||
|
);
|
||||||
|
return Ok(messages);
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"No messages found for contact {}, falling back to all contacts",
|
||||||
|
contact_name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to all contacts
|
||||||
|
log::info!(
|
||||||
|
"Fetching all SMS messages (±4 days from {})",
|
||||||
|
center_dt.format("%Y-%m-%d %H:%M:%S")
|
||||||
|
);
|
||||||
|
self.fetch_messages(start_ts, end_ts, None, Some(center_timestamp))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch all messages for a specific contact across all time
|
||||||
|
/// Used for embedding generation - retrieves complete message history
|
||||||
|
/// Handles pagination automatically if the API returns a limited number of results
|
||||||
|
pub async fn fetch_all_messages_for_contact(&self, contact: &str) -> Result<Vec<SmsMessage>> {
|
||||||
|
let start_ts = chrono::DateTime::parse_from_rfc3339("2000-01-01T00:00:00Z")
|
||||||
|
.unwrap()
|
||||||
|
.timestamp();
|
||||||
|
let end_ts = chrono::Utc::now().timestamp();
|
||||||
|
|
||||||
|
log::info!("Fetching all historical messages for contact: {}", contact);
|
||||||
|
|
||||||
|
let mut all_messages = Vec::new();
|
||||||
|
let mut offset = 0;
|
||||||
|
let limit = 1000; // Fetch in batches of 1000
|
||||||
|
|
||||||
|
loop {
|
||||||
|
log::debug!(
|
||||||
|
"Fetching batch at offset {} for contact {}",
|
||||||
|
offset,
|
||||||
|
contact
|
||||||
|
);
|
||||||
|
|
||||||
|
let batch = self
|
||||||
|
.fetch_messages_paginated(start_ts, end_ts, Some(contact), None, limit, offset)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let batch_size = batch.len();
|
||||||
|
all_messages.extend(batch);
|
||||||
|
|
||||||
|
log::debug!(
|
||||||
|
"Fetched {} messages (total so far: {})",
|
||||||
|
batch_size,
|
||||||
|
all_messages.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
// If we got fewer messages than the limit, we've reached the end
|
||||||
|
if batch_size < limit {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Fetched {} total messages for contact {}",
|
||||||
|
all_messages.len(),
|
||||||
|
contact
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(all_messages)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal method to fetch messages with pagination support
|
||||||
|
async fn fetch_messages_paginated(
|
||||||
|
&self,
|
||||||
|
start_ts: i64,
|
||||||
|
end_ts: i64,
|
||||||
|
contact: Option<&str>,
|
||||||
|
center_timestamp: Option<i64>,
|
||||||
|
limit: usize,
|
||||||
|
offset: usize,
|
||||||
|
) -> Result<Vec<SmsMessage>> {
|
||||||
|
let mut url = format!(
|
||||||
|
"{}/api/messages/by-date-range/?start_date={}&end_date={}&limit={}&offset={}",
|
||||||
|
self.base_url, start_ts, end_ts, limit, offset
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(contact_name) = contact {
|
||||||
|
url.push_str(&format!("&contact={}", urlencoding::encode(contact_name)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ts) = center_timestamp {
|
||||||
|
url.push_str(&format!("×tamp={}", ts));
|
||||||
|
}
|
||||||
|
|
||||||
|
log::debug!("Fetching SMS messages from: {}", url);
|
||||||
|
|
||||||
|
let mut request = self.client.get(&url);
|
||||||
|
|
||||||
|
if let Some(token) = &self.token {
|
||||||
|
request = request.header("Authorization", format!("Bearer {}", token));
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = request.send().await?;
|
||||||
|
|
||||||
|
log::debug!("SMS API response status: {}", response.status());
|
||||||
|
|
||||||
|
if !response.status().is_success() {
|
||||||
|
let status = response.status();
|
||||||
|
let error_body = response.text().await.unwrap_or_default();
|
||||||
|
log::error!("SMS API request failed: {} - {}", status, error_body);
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"SMS API request failed: {} - {}",
|
||||||
|
status,
|
||||||
|
error_body
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let data: SmsApiResponse = response.json().await?;
|
||||||
|
|
||||||
|
Ok(data
|
||||||
|
.messages
|
||||||
|
.into_iter()
|
||||||
|
.map(|m| SmsMessage {
|
||||||
|
contact: m.contact_name,
|
||||||
|
body: m.body,
|
||||||
|
timestamp: m.date,
|
||||||
|
is_sent: m.type_ == 2,
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal method to fetch messages with optional contact filter and timestamp sorting
|
||||||
|
async fn fetch_messages(
|
||||||
|
&self,
|
||||||
|
start_ts: i64,
|
||||||
|
end_ts: i64,
|
||||||
|
contact: Option<&str>,
|
||||||
|
center_timestamp: Option<i64>,
|
||||||
|
) -> Result<Vec<SmsMessage>> {
|
||||||
|
// Call Django endpoint
|
||||||
|
let mut url = format!(
|
||||||
|
"{}/api/messages/by-date-range/?start_date={}&end_date={}",
|
||||||
|
self.base_url, start_ts, end_ts
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add contact filter if provided
|
||||||
|
if let Some(contact_name) = contact {
|
||||||
|
url.push_str(&format!("&contact={}", urlencoding::encode(contact_name)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add timestamp for proximity sorting if provided
|
||||||
|
if let Some(ts) = center_timestamp {
|
||||||
|
url.push_str(&format!("×tamp={}", ts));
|
||||||
|
}
|
||||||
|
|
||||||
|
log::debug!("Fetching SMS messages from: {}", url);
|
||||||
|
|
||||||
|
let mut request = self.client.get(&url);
|
||||||
|
|
||||||
|
// Add authorization header if token exists
|
||||||
|
if let Some(token) = &self.token {
|
||||||
|
request = request.header("Authorization", format!("Bearer {}", token));
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = request.send().await?;
|
||||||
|
|
||||||
|
log::debug!("SMS API response status: {}", response.status());
|
||||||
|
|
||||||
|
if !response.status().is_success() {
|
||||||
|
let status = response.status();
|
||||||
|
let error_body = response.text().await.unwrap_or_default();
|
||||||
|
log::error!("SMS API request failed: {} - {}", status, error_body);
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"SMS API request failed: {} - {}",
|
||||||
|
status,
|
||||||
|
error_body
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let data: SmsApiResponse = response.json().await?;
|
||||||
|
|
||||||
|
// Convert to internal format
|
||||||
|
Ok(data
|
||||||
|
.messages
|
||||||
|
.into_iter()
|
||||||
|
.map(|m| SmsMessage {
|
||||||
|
contact: m.contact_name,
|
||||||
|
body: m.body,
|
||||||
|
timestamp: m.date,
|
||||||
|
is_sent: m.type_ == 2, // type 2 = sent
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Search message bodies via the Django side's FTS5 / semantic / hybrid
|
||||||
|
/// endpoint. `mode` selects the ranking strategy:
|
||||||
|
/// - "fts5" keyword-only, supports phrase / prefix / boolean / NEAR
|
||||||
|
/// - "semantic" embedding similarity
|
||||||
|
/// - "hybrid" both merged via reciprocal rank fusion (recommended)
|
||||||
|
pub async fn search_messages(
|
||||||
|
&self,
|
||||||
|
query: &str,
|
||||||
|
mode: &str,
|
||||||
|
limit: usize,
|
||||||
|
) -> Result<Vec<SmsSearchHit>> {
|
||||||
|
let url = format!(
|
||||||
|
"{}/api/messages/search/?q={}&mode={}&limit={}",
|
||||||
|
self.base_url,
|
||||||
|
urlencoding::encode(query),
|
||||||
|
urlencoding::encode(mode),
|
||||||
|
limit
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut request = self.client.get(&url);
|
||||||
|
if let Some(token) = &self.token {
|
||||||
|
request = request.header("Authorization", format!("Bearer {}", token));
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = request.send().await?;
|
||||||
|
if !response.status().is_success() {
|
||||||
|
let status = response.status();
|
||||||
|
let body = response.text().await.unwrap_or_default();
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"SMS search request failed: {} - {}",
|
||||||
|
status,
|
||||||
|
body
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let data: SmsSearchResponse = response.json().await?;
|
||||||
|
Ok(data.results)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn summarize_context(
|
||||||
|
&self,
|
||||||
|
messages: &[SmsMessage],
|
||||||
|
ollama: &OllamaClient,
|
||||||
|
) -> Result<String> {
|
||||||
|
if messages.is_empty() {
|
||||||
|
return Ok(String::from("No messages on this day"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create prompt for Ollama with sender/receiver distinction
|
||||||
|
let user_name = crate::ai::user_display_name();
|
||||||
|
let messages_text: String = messages
|
||||||
|
.iter()
|
||||||
|
.take(60) // Limit to avoid token overflow
|
||||||
|
.map(|m| {
|
||||||
|
if m.is_sent {
|
||||||
|
format!("{}: {}", user_name, m.body)
|
||||||
|
} else {
|
||||||
|
format!("{}: {}", m.contact, m.body)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
let prompt = format!(
|
||||||
|
r#"Summarize these messages in up to 4-5 sentences. Focus on key topics, places, people mentioned, and the overall context of the conversations.
|
||||||
|
|
||||||
|
Messages:
|
||||||
|
{}
|
||||||
|
|
||||||
|
Summary:"#,
|
||||||
|
messages_text
|
||||||
|
);
|
||||||
|
|
||||||
|
ollama
|
||||||
|
.generate(
|
||||||
|
&prompt,
|
||||||
|
// Some("You are a summarizer for the purposes of jogging my memory and highlighting events and situations."),
|
||||||
|
Some("You are the keeper of memories, ingest the context and give me a casual summary of the moment."),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct SmsMessage {
|
||||||
|
pub contact: String,
|
||||||
|
pub body: String,
|
||||||
|
pub timestamp: i64,
|
||||||
|
pub is_sent: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct SmsApiResponse {
|
||||||
|
messages: Vec<SmsApiMessage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct SmsApiMessage {
|
||||||
|
contact_name: String,
|
||||||
|
body: String,
|
||||||
|
date: i64,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
type_: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct SmsSearchHit {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub message_id: i64,
|
||||||
|
pub contact_name: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub contact_address: String,
|
||||||
|
pub body: String,
|
||||||
|
pub date: i64,
|
||||||
|
/// Message direction code: 1 = received, 2 = sent.
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub type_: i32,
|
||||||
|
/// Present for semantic / hybrid modes; absent for fts5.
|
||||||
|
#[serde(default)]
|
||||||
|
pub similarity_score: Option<f32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct SmsSearchResponse {
|
||||||
|
results: Vec<SmsSearchHit>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[serde(default)]
|
||||||
|
search_method: String,
|
||||||
|
}
|
||||||
106
src/auth.rs
106
src/auth.rs
@@ -1,57 +1,91 @@
|
|||||||
use actix_web::web::{self, HttpResponse, Json};
|
use actix_web::Responder;
|
||||||
use actix_web::{post, Responder};
|
use actix_web::{
|
||||||
|
HttpResponse,
|
||||||
|
web::{self, Json},
|
||||||
|
};
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{Duration, Utc};
|
||||||
use jsonwebtoken::{encode, EncodingKey, Header};
|
use jsonwebtoken::{EncodingKey, Header, encode};
|
||||||
use log::{debug, error};
|
use log::{error, info};
|
||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
data::{secret_key, Claims, CreateAccountRequest, LoginRequest, Token},
|
data::{Claims, CreateAccountRequest, LoginRequest, Token, secret_key},
|
||||||
database::UserDao,
|
database::UserDao,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[post("/register")]
|
/// Validate password meets security requirements
|
||||||
async fn register(
|
fn validate_password(password: &str) -> Result<(), String> {
|
||||||
|
if password.len() < 12 {
|
||||||
|
return Err("Password must be at least 12 characters".into());
|
||||||
|
}
|
||||||
|
if !password.chars().any(|c| c.is_uppercase()) {
|
||||||
|
return Err("Password must contain at least one uppercase letter".into());
|
||||||
|
}
|
||||||
|
if !password.chars().any(|c| c.is_lowercase()) {
|
||||||
|
return Err("Password must contain at least one lowercase letter".into());
|
||||||
|
}
|
||||||
|
if !password.chars().any(|c| c.is_numeric()) {
|
||||||
|
return Err("Password must contain at least one number".into());
|
||||||
|
}
|
||||||
|
if !password.chars().any(|c| !c.is_alphanumeric()) {
|
||||||
|
return Err("Password must contain at least one special character".into());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
async fn register<D: UserDao>(
|
||||||
user: Json<CreateAccountRequest>,
|
user: Json<CreateAccountRequest>,
|
||||||
user_dao: web::Data<Box<dyn UserDao>>,
|
user_dao: web::Data<Mutex<D>>,
|
||||||
) -> impl Responder {
|
) -> impl Responder {
|
||||||
if !user.username.is_empty() && user.password.len() > 5 && user.password == user.confirmation {
|
// Validate password strength
|
||||||
if user_dao.user_exists(&user.username) {
|
if let Err(msg) = validate_password(&user.password) {
|
||||||
HttpResponse::BadRequest()
|
return HttpResponse::BadRequest().body(msg);
|
||||||
} else if let Some(_user) = user_dao.create_user(&user.username, &user.password) {
|
}
|
||||||
HttpResponse::Ok()
|
|
||||||
|
if !user.username.is_empty() && user.password == user.confirmation {
|
||||||
|
let mut dao = user_dao.lock().expect("Unable to get UserDao");
|
||||||
|
if dao.user_exists(&user.username) {
|
||||||
|
HttpResponse::BadRequest().finish()
|
||||||
|
} else if let Some(_user) = dao.create_user(&user.username, &user.password) {
|
||||||
|
HttpResponse::Ok().finish()
|
||||||
} else {
|
} else {
|
||||||
HttpResponse::InternalServerError()
|
HttpResponse::InternalServerError().finish()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
HttpResponse::BadRequest()
|
HttpResponse::BadRequest().finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn login(
|
pub async fn login<D: UserDao>(
|
||||||
creds: Json<LoginRequest>,
|
creds: Json<LoginRequest>,
|
||||||
user_dao: web::Data<Box<dyn UserDao>>,
|
user_dao: web::Data<Mutex<D>>,
|
||||||
) -> HttpResponse {
|
) -> HttpResponse {
|
||||||
debug!("Logging in: {}", creds.username);
|
info!("Logging in: {}", creds.username);
|
||||||
|
|
||||||
|
let mut user_dao = user_dao.lock().expect("Unable to get UserDao");
|
||||||
|
|
||||||
if let Some(user) = user_dao.get_user(&creds.username, &creds.password) {
|
if let Some(user) = user_dao.get_user(&creds.username, &creds.password) {
|
||||||
let claims = Claims {
|
let claims = Claims {
|
||||||
sub: user.id.to_string(),
|
sub: user.id.to_string(),
|
||||||
exp: (Utc::now() + Duration::days(5)).timestamp(),
|
exp: (Utc::now() + Duration::days(5)).timestamp(),
|
||||||
};
|
};
|
||||||
let token = encode(
|
let token = match encode(
|
||||||
&Header::default(),
|
&Header::default(),
|
||||||
&claims,
|
&claims,
|
||||||
&EncodingKey::from_secret(secret_key().as_bytes()),
|
&EncodingKey::from_secret(secret_key().as_bytes()),
|
||||||
)
|
) {
|
||||||
.unwrap();
|
Ok(t) => t,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to encode JWT: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().finish();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
HttpResponse::Ok().json(Token { token: &token })
|
HttpResponse::Ok().json(Token { token: &token })
|
||||||
} else {
|
} else {
|
||||||
error!(
|
error!("Failed login attempt for user: '{}'", creds.username);
|
||||||
"User not found during login or incorrect password: '{}'",
|
HttpResponse::Unauthorized().finish()
|
||||||
creds.username
|
|
||||||
);
|
|
||||||
HttpResponse::NotFound().finish()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,7 +96,7 @@ mod tests {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_login_reports_200_when_user_exists() {
|
async fn test_login_reports_200_when_user_exists() {
|
||||||
let dao = TestUserDao::new();
|
let mut dao = TestUserDao::new();
|
||||||
dao.create_user("user", "pass");
|
dao.create_user("user", "pass");
|
||||||
|
|
||||||
let j = Json(LoginRequest {
|
let j = Json(LoginRequest {
|
||||||
@@ -70,14 +104,14 @@ mod tests {
|
|||||||
password: "pass".to_string(),
|
password: "pass".to_string(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let response = login(j, web::Data::new(Box::new(dao))).await;
|
let response = login::<TestUserDao>(j, web::Data::new(Mutex::new(dao))).await;
|
||||||
|
|
||||||
assert_eq!(response.status(), 200);
|
assert_eq!(response.status(), 200);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_login_returns_token_on_success() {
|
async fn test_login_returns_token_on_success() {
|
||||||
let dao = TestUserDao::new();
|
let mut dao = TestUserDao::new();
|
||||||
dao.create_user("user", "password");
|
dao.create_user("user", "password");
|
||||||
|
|
||||||
let j = Json(LoginRequest {
|
let j = Json(LoginRequest {
|
||||||
@@ -85,15 +119,17 @@ mod tests {
|
|||||||
password: "password".to_string(),
|
password: "password".to_string(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let response = login(j, web::Data::new(Box::new(dao))).await;
|
let response = login::<TestUserDao>(j, web::Data::new(Mutex::new(dao))).await;
|
||||||
|
|
||||||
assert_eq!(response.status(), 200);
|
assert_eq!(response.status(), 200);
|
||||||
assert!(response.body().read_to_str().contains("\"token\""));
|
let response_text: String = response.read_to_str();
|
||||||
|
|
||||||
|
assert!(response_text.contains("\"token\""));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_login_reports_404_when_user_does_not_exist() {
|
async fn test_login_reports_401_when_user_does_not_exist() {
|
||||||
let dao = TestUserDao::new();
|
let mut dao = TestUserDao::new();
|
||||||
dao.create_user("user", "password");
|
dao.create_user("user", "password");
|
||||||
|
|
||||||
let j = Json(LoginRequest {
|
let j = Json(LoginRequest {
|
||||||
@@ -101,8 +137,8 @@ mod tests {
|
|||||||
password: "password".to_string(),
|
password: "password".to_string(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let response = login(j, web::Data::new(Box::new(dao))).await;
|
let response = login::<TestUserDao>(j, web::Data::new(Mutex::new(dao))).await;
|
||||||
|
|
||||||
assert_eq!(response.status(), 404);
|
assert_eq!(response.status(), 401);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
186
src/bin/backfill_hashes.rs
Normal file
186
src/bin/backfill_hashes.rs
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
//! Backfill `image_exif.content_hash` + `size_bytes` for rows that were
|
||||||
|
//! ingested before hash computation was wired into the watcher.
|
||||||
|
//!
|
||||||
|
//! The watcher computes hashes for new files as they're ingested, so this
|
||||||
|
//! binary is a one-shot tool for the historical backlog. Safe to re-run;
|
||||||
|
//! only rows with NULL content_hash are processed.
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use log::{error, warn};
|
||||||
|
use rayon::prelude::*;
|
||||||
|
|
||||||
|
use image_api::bin_progress;
|
||||||
|
use image_api::content_hash;
|
||||||
|
use image_api::database::{ExifDao, SqliteExifDao, connect};
|
||||||
|
use image_api::libraries::{self, Library};
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "backfill_hashes")]
|
||||||
|
#[command(about = "Compute content_hash for image_exif rows missing one")]
|
||||||
|
struct Args {
|
||||||
|
/// Max rows to hash per batch. The process loops until no rows remain.
|
||||||
|
#[arg(long, default_value_t = 500)]
|
||||||
|
batch_size: i64,
|
||||||
|
|
||||||
|
/// Rayon parallelism override. 0 uses the default thread pool size.
|
||||||
|
#[arg(long, default_value_t = 0)]
|
||||||
|
parallelism: usize,
|
||||||
|
|
||||||
|
/// Dry-run: log what would be hashed without writing to the DB.
|
||||||
|
#[arg(long)]
|
||||||
|
dry_run: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
env_logger::init();
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
if args.parallelism > 0 {
|
||||||
|
rayon::ThreadPoolBuilder::new()
|
||||||
|
.num_threads(args.parallelism)
|
||||||
|
.build_global()
|
||||||
|
.expect("Unable to configure rayon thread pool");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve libraries (patch placeholder if still unset) so we can map
|
||||||
|
// library_id back to a root_path on disk.
|
||||||
|
let base_path = dotenv::var("BASE_PATH").ok();
|
||||||
|
let mut seed_conn = connect();
|
||||||
|
if let Some(base) = base_path.as_deref() {
|
||||||
|
libraries::seed_or_patch_from_env(&mut seed_conn, base);
|
||||||
|
}
|
||||||
|
let libs = libraries::load_all(&mut seed_conn);
|
||||||
|
drop(seed_conn);
|
||||||
|
if libs.is_empty() {
|
||||||
|
anyhow::bail!("No libraries configured; cannot backfill hashes");
|
||||||
|
}
|
||||||
|
let libs_by_id: std::collections::HashMap<i32, Library> =
|
||||||
|
libs.into_iter().map(|lib| (lib.id, lib)).collect();
|
||||||
|
println!(
|
||||||
|
"Configured libraries: {}",
|
||||||
|
libs_by_id
|
||||||
|
.values()
|
||||||
|
.map(|l| format!("{} -> {}", l.name, l.root_path))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(", ")
|
||||||
|
);
|
||||||
|
|
||||||
|
let dao: Arc<Mutex<Box<dyn ExifDao>>> = Arc::new(Mutex::new(Box::new(SqliteExifDao::new())));
|
||||||
|
let ctx = opentelemetry::Context::new();
|
||||||
|
|
||||||
|
let mut total_hashed = 0u64;
|
||||||
|
let mut total_missing = 0u64;
|
||||||
|
let mut total_errors = 0u64;
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
let pb = bin_progress::spinner("hashing");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let rows = {
|
||||||
|
let mut guard = dao.lock().expect("Unable to lock ExifDao");
|
||||||
|
guard
|
||||||
|
.get_rows_missing_hash(&ctx, args.batch_size)
|
||||||
|
.map_err(|e| anyhow::anyhow!("DB error: {:?}", e))?
|
||||||
|
};
|
||||||
|
if rows.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let batch_size = rows.len();
|
||||||
|
pb.set_message(format!(
|
||||||
|
"batch of {} (hashed={} missing={} errors={})",
|
||||||
|
batch_size, total_hashed, total_missing, total_errors
|
||||||
|
));
|
||||||
|
|
||||||
|
// Compute hashes in parallel (I/O-bound; rayon helps on local disks,
|
||||||
|
// throttled by network on SMB mounts — use --parallelism to tune).
|
||||||
|
let results: Vec<(i32, String, Option<content_hash::FileIdentity>)> = rows
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|(library_id, rel_path)| {
|
||||||
|
let abs = libs_by_id
|
||||||
|
.get(&library_id)
|
||||||
|
.map(|lib| Path::new(&lib.root_path).join(&rel_path));
|
||||||
|
match abs {
|
||||||
|
Some(abs_path) if abs_path.exists() => match content_hash::compute(&abs_path) {
|
||||||
|
Ok(id) => (library_id, rel_path, Some(id)),
|
||||||
|
Err(e) => {
|
||||||
|
error!("hash error for {}: {:?}", abs_path.display(), e);
|
||||||
|
(library_id, rel_path, None)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Some(_) => (library_id, rel_path, None), // file missing on disk
|
||||||
|
None => {
|
||||||
|
warn!("Row refers to unknown library_id {}", library_id);
|
||||||
|
(library_id, rel_path, None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Persist sequentially — SQLite writes serialize anyway.
|
||||||
|
if !args.dry_run {
|
||||||
|
let mut guard = dao.lock().expect("Unable to lock ExifDao");
|
||||||
|
for (library_id, rel_path, ident) in &results {
|
||||||
|
match ident {
|
||||||
|
Some(id) => {
|
||||||
|
match guard.backfill_content_hash(
|
||||||
|
&ctx,
|
||||||
|
*library_id,
|
||||||
|
rel_path,
|
||||||
|
&id.content_hash,
|
||||||
|
id.size_bytes,
|
||||||
|
) {
|
||||||
|
Ok(_) => {
|
||||||
|
total_hashed += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("persist error for {}: {:?}", rel_path, e));
|
||||||
|
total_errors += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
total_missing += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (_, rel_path, ident) in &results {
|
||||||
|
match ident {
|
||||||
|
Some(id) => {
|
||||||
|
pb.println(format!(
|
||||||
|
"[dry-run] {} -> {} ({} bytes)",
|
||||||
|
rel_path, id.content_hash, id.size_bytes
|
||||||
|
));
|
||||||
|
total_hashed += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
total_missing += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pb.println(format!(
|
||||||
|
"[dry-run] processed one batch of {}. Stopping — a real run would continue \
|
||||||
|
until no NULL content_hash rows remain.",
|
||||||
|
results.len()
|
||||||
|
));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_and_clear();
|
||||||
|
println!(
|
||||||
|
"Done. hashed={}, skipped (missing on disk)={}, errors={}, elapsed={:.1}s",
|
||||||
|
total_hashed,
|
||||||
|
total_missing,
|
||||||
|
total_errors,
|
||||||
|
start.elapsed().as_secs_f64()
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
143
src/bin/cleanup_files.rs
Normal file
143
src/bin/cleanup_files.rs
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
use image_api::cleanup::{
|
||||||
|
CleanupConfig, DatabaseUpdater, resolve_missing_files, validate_file_types,
|
||||||
|
};
|
||||||
|
use image_api::database::{SqliteExifDao, SqliteFavoriteDao};
|
||||||
|
use image_api::tags::SqliteTagDao;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "cleanup_files")]
|
||||||
|
#[command(about = "File cleanup and fix utility for ImageApi", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
#[arg(long, help = "Preview changes without making them")]
|
||||||
|
dry_run: bool,
|
||||||
|
|
||||||
|
#[arg(long, help = "Auto-fix all issues without prompting")]
|
||||||
|
auto_fix: bool,
|
||||||
|
|
||||||
|
#[arg(long, help = "Skip phase 1 (missing file resolution)")]
|
||||||
|
skip_phase1: bool,
|
||||||
|
|
||||||
|
#[arg(long, help = "Skip phase 2 (file type validation)")]
|
||||||
|
skip_phase2: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
// Initialize logging
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
// Load environment variables
|
||||||
|
dotenv::dotenv()?;
|
||||||
|
|
||||||
|
// Parse CLI arguments
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Get base path from environment
|
||||||
|
let base_path = dotenv::var("BASE_PATH")?;
|
||||||
|
let base = PathBuf::from(&base_path);
|
||||||
|
|
||||||
|
println!("File Cleanup and Fix Utility");
|
||||||
|
println!("============================");
|
||||||
|
println!("Base path: {}", base.display());
|
||||||
|
println!("Dry run: {}", args.dry_run);
|
||||||
|
println!("Auto fix: {}", args.auto_fix);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Pre-flight checks
|
||||||
|
if !base.exists() {
|
||||||
|
eprintln!("Error: Base path does not exist: {}", base.display());
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !base.is_dir() {
|
||||||
|
eprintln!("Error: Base path is not a directory: {}", base.display());
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create configuration
|
||||||
|
let config = CleanupConfig {
|
||||||
|
base_path: base,
|
||||||
|
dry_run: args.dry_run,
|
||||||
|
auto_fix: args.auto_fix,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create DAOs
|
||||||
|
println!("Connecting to database...");
|
||||||
|
let tag_dao: Arc<Mutex<dyn image_api::tags::TagDao>> =
|
||||||
|
Arc::new(Mutex::new(SqliteTagDao::default()));
|
||||||
|
let exif_dao: Arc<Mutex<dyn image_api::database::ExifDao>> =
|
||||||
|
Arc::new(Mutex::new(SqliteExifDao::new()));
|
||||||
|
let favorites_dao: Arc<Mutex<dyn image_api::database::FavoriteDao>> =
|
||||||
|
Arc::new(Mutex::new(SqliteFavoriteDao::new()));
|
||||||
|
|
||||||
|
// Create database updater
|
||||||
|
let mut db_updater = DatabaseUpdater::new(tag_dao, exif_dao, favorites_dao);
|
||||||
|
|
||||||
|
println!("✓ Database connected\n");
|
||||||
|
|
||||||
|
// Track overall statistics
|
||||||
|
let mut total_issues_found = 0;
|
||||||
|
let mut total_issues_fixed = 0;
|
||||||
|
let mut total_errors = Vec::new();
|
||||||
|
|
||||||
|
// Phase 1: Missing file resolution
|
||||||
|
if !args.skip_phase1 {
|
||||||
|
match resolve_missing_files(&config, &mut db_updater) {
|
||||||
|
Ok(stats) => {
|
||||||
|
total_issues_found += stats.issues_found;
|
||||||
|
total_issues_fixed += stats.issues_fixed;
|
||||||
|
total_errors.extend(stats.errors);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Phase 1 failed: {:?}", e);
|
||||||
|
total_errors.push(format!("Phase 1 error: {}", e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
println!("Phase 1: Skipped (--skip-phase1)");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: File type validation
|
||||||
|
if !args.skip_phase2 {
|
||||||
|
match validate_file_types(&config, &mut db_updater) {
|
||||||
|
Ok(stats) => {
|
||||||
|
total_issues_found += stats.issues_found;
|
||||||
|
total_issues_fixed += stats.issues_fixed;
|
||||||
|
total_errors.extend(stats.errors);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Phase 2 failed: {:?}", e);
|
||||||
|
total_errors.push(format!("Phase 2 error: {}", e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
println!("\nPhase 2: Skipped (--skip-phase2)");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final summary
|
||||||
|
println!("\n============================");
|
||||||
|
println!("Cleanup Complete!");
|
||||||
|
println!("============================");
|
||||||
|
println!("Total issues found: {}", total_issues_found);
|
||||||
|
if config.dry_run {
|
||||||
|
println!("Total issues that would be fixed: {}", total_issues_found);
|
||||||
|
} else {
|
||||||
|
println!("Total issues fixed: {}", total_issues_fixed);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !total_errors.is_empty() {
|
||||||
|
println!("\nErrors encountered:");
|
||||||
|
for (i, error) in total_errors.iter().enumerate() {
|
||||||
|
println!(" {}. {}", i + 1, error);
|
||||||
|
}
|
||||||
|
println!("\nSome operations failed. Review errors above.");
|
||||||
|
} else {
|
||||||
|
println!("\n✓ No errors encountered");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
307
src/bin/diagnose_embeddings.rs
Normal file
307
src/bin/diagnose_embeddings.rs
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use diesel::prelude::*;
|
||||||
|
use diesel::sql_query;
|
||||||
|
use diesel::sqlite::SqliteConnection;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about = "Diagnose embedding distribution and identify problematic summaries", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Show detailed per-summary statistics
|
||||||
|
#[arg(short, long, default_value_t = false)]
|
||||||
|
verbose: bool,
|
||||||
|
|
||||||
|
/// Number of top "central" summaries to show (ones that match everything)
|
||||||
|
#[arg(short, long, default_value_t = 10)]
|
||||||
|
top: usize,
|
||||||
|
|
||||||
|
/// Test a specific query to see what matches
|
||||||
|
#[arg(short, long)]
|
||||||
|
query: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(QueryableByName, Debug)]
|
||||||
|
struct EmbeddingRow {
|
||||||
|
#[diesel(sql_type = diesel::sql_types::Integer)]
|
||||||
|
id: i32,
|
||||||
|
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||||
|
date: String,
|
||||||
|
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||||
|
contact: String,
|
||||||
|
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||||
|
summary: String,
|
||||||
|
#[diesel(sql_type = diesel::sql_types::Binary)]
|
||||||
|
embedding: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize_embedding(bytes: &[u8]) -> Result<Vec<f32>> {
|
||||||
|
if !bytes.len().is_multiple_of(4) {
|
||||||
|
return Err(anyhow::anyhow!("Invalid embedding byte length"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let count = bytes.len() / 4;
|
||||||
|
let mut vec = Vec::with_capacity(count);
|
||||||
|
|
||||||
|
for chunk in bytes.chunks_exact(4) {
|
||||||
|
let float = f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]);
|
||||||
|
vec.push(float);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(vec)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
|
||||||
|
if a.len() != b.len() {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
let dot_product: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
|
||||||
|
let magnitude_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||||
|
let magnitude_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||||
|
|
||||||
|
if magnitude_a == 0.0 || magnitude_b == 0.0 {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dot_product / (magnitude_a * magnitude_b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> Result<()> {
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let database_url = env::var("DATABASE_URL").unwrap_or_else(|_| "auth.db".to_string());
|
||||||
|
println!("Connecting to database: {}", database_url);
|
||||||
|
|
||||||
|
let mut conn = SqliteConnection::establish(&database_url)?;
|
||||||
|
|
||||||
|
// Load all embeddings
|
||||||
|
println!("\nLoading embeddings from daily_conversation_summaries...");
|
||||||
|
let rows: Vec<EmbeddingRow> = sql_query(
|
||||||
|
"SELECT id, date, contact, summary, embedding FROM daily_conversation_summaries ORDER BY date"
|
||||||
|
)
|
||||||
|
.load(&mut conn)?;
|
||||||
|
|
||||||
|
println!("Found {} summaries with embeddings\n", rows.len());
|
||||||
|
|
||||||
|
if rows.is_empty() {
|
||||||
|
println!("No summaries found!");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse all embeddings
|
||||||
|
let mut embeddings: Vec<(i32, String, String, String, Vec<f32>)> = Vec::new();
|
||||||
|
for row in &rows {
|
||||||
|
match deserialize_embedding(&row.embedding) {
|
||||||
|
Ok(emb) => {
|
||||||
|
embeddings.push((
|
||||||
|
row.id,
|
||||||
|
row.date.clone(),
|
||||||
|
row.contact.clone(),
|
||||||
|
row.summary.clone(),
|
||||||
|
emb,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!(
|
||||||
|
"Warning: Failed to parse embedding for id {}: {}",
|
||||||
|
row.id, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Successfully parsed {} embeddings\n", embeddings.len());
|
||||||
|
|
||||||
|
// Compute embedding statistics
|
||||||
|
println!("========================================");
|
||||||
|
println!("EMBEDDING STATISTICS");
|
||||||
|
println!("========================================\n");
|
||||||
|
|
||||||
|
// Check embedding variance (are values clustered or spread out?)
|
||||||
|
let first_emb = &embeddings[0].4;
|
||||||
|
let dim = first_emb.len();
|
||||||
|
println!("Embedding dimensions: {}", dim);
|
||||||
|
|
||||||
|
// Calculate mean and std dev per dimension
|
||||||
|
let mut dim_means: Vec<f32> = vec![0.0; dim];
|
||||||
|
let mut dim_vars: Vec<f32> = vec![0.0; dim];
|
||||||
|
|
||||||
|
for (_, _, _, _, emb) in &embeddings {
|
||||||
|
for (i, &val) in emb.iter().enumerate() {
|
||||||
|
dim_means[i] += val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for m in &mut dim_means {
|
||||||
|
*m /= embeddings.len() as f32;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (_, _, _, _, emb) in &embeddings {
|
||||||
|
for (i, &val) in emb.iter().enumerate() {
|
||||||
|
let diff = val - dim_means[i];
|
||||||
|
dim_vars[i] += diff * diff;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for v in &mut dim_vars {
|
||||||
|
*v = (*v / embeddings.len() as f32).sqrt();
|
||||||
|
}
|
||||||
|
|
||||||
|
let avg_std_dev: f32 = dim_vars.iter().sum::<f32>() / dim as f32;
|
||||||
|
let min_std_dev: f32 = dim_vars.iter().cloned().fold(f32::INFINITY, f32::min);
|
||||||
|
let max_std_dev: f32 = dim_vars.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
|
||||||
|
|
||||||
|
println!("Per-dimension standard deviation:");
|
||||||
|
println!(" Average: {:.6}", avg_std_dev);
|
||||||
|
println!(" Min: {:.6}", min_std_dev);
|
||||||
|
println!(" Max: {:.6}", max_std_dev);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Compute pairwise similarities
|
||||||
|
println!("Computing pairwise similarities (this may take a moment)...\n");
|
||||||
|
|
||||||
|
let mut all_similarities: Vec<f32> = Vec::new();
|
||||||
|
let mut per_embedding_avg: Vec<(usize, f32)> = Vec::new();
|
||||||
|
|
||||||
|
for i in 0..embeddings.len() {
|
||||||
|
let mut sum = 0.0;
|
||||||
|
let mut count = 0;
|
||||||
|
for j in 0..embeddings.len() {
|
||||||
|
if i != j {
|
||||||
|
let sim = cosine_similarity(&embeddings[i].4, &embeddings[j].4);
|
||||||
|
all_similarities.push(sim);
|
||||||
|
sum += sim;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
per_embedding_avg.push((i, sum / count as f32));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort similarities for percentile analysis
|
||||||
|
all_similarities.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
||||||
|
|
||||||
|
let min_sim = all_similarities.first().copied().unwrap_or(0.0);
|
||||||
|
let max_sim = all_similarities.last().copied().unwrap_or(0.0);
|
||||||
|
let median_sim = all_similarities[all_similarities.len() / 2];
|
||||||
|
let p25 = all_similarities[all_similarities.len() / 4];
|
||||||
|
let p75 = all_similarities[3 * all_similarities.len() / 4];
|
||||||
|
let mean_sim: f32 = all_similarities.iter().sum::<f32>() / all_similarities.len() as f32;
|
||||||
|
|
||||||
|
println!("========================================");
|
||||||
|
println!("PAIRWISE SIMILARITY DISTRIBUTION");
|
||||||
|
println!("========================================\n");
|
||||||
|
println!("Total pairs analyzed: {}", all_similarities.len());
|
||||||
|
println!();
|
||||||
|
println!("Min similarity: {:.4}", min_sim);
|
||||||
|
println!("25th percentile: {:.4}", p25);
|
||||||
|
println!("Median similarity: {:.4}", median_sim);
|
||||||
|
println!("Mean similarity: {:.4}", mean_sim);
|
||||||
|
println!("75th percentile: {:.4}", p75);
|
||||||
|
println!("Max similarity: {:.4}", max_sim);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Analyze distribution
|
||||||
|
let count_above_08 = all_similarities.iter().filter(|&&s| s > 0.8).count();
|
||||||
|
let count_above_07 = all_similarities.iter().filter(|&&s| s > 0.7).count();
|
||||||
|
let count_above_06 = all_similarities.iter().filter(|&&s| s > 0.6).count();
|
||||||
|
let count_above_05 = all_similarities.iter().filter(|&&s| s > 0.5).count();
|
||||||
|
let count_below_03 = all_similarities.iter().filter(|&&s| s < 0.3).count();
|
||||||
|
|
||||||
|
println!("Similarity distribution:");
|
||||||
|
println!(
|
||||||
|
" > 0.8: {} ({:.1}%)",
|
||||||
|
count_above_08,
|
||||||
|
100.0 * count_above_08 as f32 / all_similarities.len() as f32
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" > 0.7: {} ({:.1}%)",
|
||||||
|
count_above_07,
|
||||||
|
100.0 * count_above_07 as f32 / all_similarities.len() as f32
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" > 0.6: {} ({:.1}%)",
|
||||||
|
count_above_06,
|
||||||
|
100.0 * count_above_06 as f32 / all_similarities.len() as f32
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" > 0.5: {} ({:.1}%)",
|
||||||
|
count_above_05,
|
||||||
|
100.0 * count_above_05 as f32 / all_similarities.len() as f32
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" < 0.3: {} ({:.1}%)",
|
||||||
|
count_below_03,
|
||||||
|
100.0 * count_below_03 as f32 / all_similarities.len() as f32
|
||||||
|
);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Identify "central" embeddings (high average similarity to all others)
|
||||||
|
per_embedding_avg.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
|
||||||
|
|
||||||
|
println!("========================================");
|
||||||
|
println!("TOP {} MOST 'CENTRAL' SUMMARIES", args.top);
|
||||||
|
println!("(These match everything with high similarity)");
|
||||||
|
println!("========================================\n");
|
||||||
|
|
||||||
|
for (rank, (idx, avg_sim)) in per_embedding_avg.iter().take(args.top).enumerate() {
|
||||||
|
let (id, date, contact, summary, _) = &embeddings[*idx];
|
||||||
|
let preview: String = summary.chars().take(80).collect();
|
||||||
|
println!("{}. [id={}, avg_sim={:.4}]", rank + 1, id, avg_sim);
|
||||||
|
println!(" Date: {}, Contact: {}", date, contact);
|
||||||
|
println!(" Preview: {}...", preview.replace('\n', " "));
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also show the least central (most unique)
|
||||||
|
println!("========================================");
|
||||||
|
println!("TOP {} MOST UNIQUE SUMMARIES", args.top);
|
||||||
|
println!("(These are most different from others)");
|
||||||
|
println!("========================================\n");
|
||||||
|
|
||||||
|
for (rank, (idx, avg_sim)) in per_embedding_avg.iter().rev().take(args.top).enumerate() {
|
||||||
|
let (id, date, contact, summary, _) = &embeddings[*idx];
|
||||||
|
let preview: String = summary.chars().take(80).collect();
|
||||||
|
println!("{}. [id={}, avg_sim={:.4}]", rank + 1, id, avg_sim);
|
||||||
|
println!(" Date: {}, Contact: {}", date, contact);
|
||||||
|
println!(" Preview: {}...", preview.replace('\n', " "));
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diagnosis
|
||||||
|
println!("========================================");
|
||||||
|
println!("DIAGNOSIS");
|
||||||
|
println!("========================================\n");
|
||||||
|
|
||||||
|
if mean_sim > 0.7 {
|
||||||
|
println!("⚠️ HIGH AVERAGE SIMILARITY ({:.4})", mean_sim);
|
||||||
|
println!(" All embeddings are very similar to each other.");
|
||||||
|
println!(" This explains why the same summaries always match.");
|
||||||
|
println!();
|
||||||
|
println!(" Possible causes:");
|
||||||
|
println!(
|
||||||
|
" 1. Summaries have similar structure/phrasing (e.g., all start with 'Summary:')"
|
||||||
|
);
|
||||||
|
println!(" 2. Embedding model isn't capturing semantic differences well");
|
||||||
|
println!(" 3. Daily conversations have similar topics (e.g., 'good morning', plans)");
|
||||||
|
println!();
|
||||||
|
println!(" Recommendations:");
|
||||||
|
println!(" 1. Try a different embedding model (mxbai-embed-large, bge-large)");
|
||||||
|
println!(" 2. Improve summary diversity by varying the prompt");
|
||||||
|
println!(" 3. Extract and embed only keywords/entities, not full summaries");
|
||||||
|
} else if mean_sim > 0.5 {
|
||||||
|
println!("⚡ MODERATE AVERAGE SIMILARITY ({:.4})", mean_sim);
|
||||||
|
println!(" Some clustering in embeddings, but some differentiation exists.");
|
||||||
|
println!();
|
||||||
|
println!(" The 'central' summaries above are likely dominating search results.");
|
||||||
|
println!(" Consider:");
|
||||||
|
println!(" 1. Filtering out summaries with very high centrality");
|
||||||
|
println!(" 2. Adding time-based weighting to prefer recent/relevant dates");
|
||||||
|
println!(" 3. Increasing the similarity threshold from 0.3 to 0.5");
|
||||||
|
} else {
|
||||||
|
println!("✅ GOOD EMBEDDING DIVERSITY ({:.4})", mean_sim);
|
||||||
|
println!(" Embeddings are well-differentiated.");
|
||||||
|
println!(" If same results keep appearing, the issue may be elsewhere.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
171
src/bin/import_calendar.rs
Normal file
171
src/bin/import_calendar.rs
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use chrono::Utc;
|
||||||
|
use clap::Parser;
|
||||||
|
use image_api::ai::ollama::OllamaClient;
|
||||||
|
use image_api::bin_progress;
|
||||||
|
use image_api::database::calendar_dao::{InsertCalendarEvent, SqliteCalendarEventDao};
|
||||||
|
use image_api::parsers::ical_parser::parse_ics_file;
|
||||||
|
use log::{error, info};
|
||||||
|
|
||||||
|
// Import the trait to use its methods
|
||||||
|
use image_api::database::CalendarEventDao;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about = "Import Google Takeout Calendar data", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Path to the .ics calendar file
|
||||||
|
#[arg(short, long)]
|
||||||
|
path: String,
|
||||||
|
|
||||||
|
/// Generate embeddings for calendar events (slower but enables semantic search)
|
||||||
|
#[arg(long, default_value = "false")]
|
||||||
|
generate_embeddings: bool,
|
||||||
|
|
||||||
|
/// Skip events that already exist in the database
|
||||||
|
#[arg(long, default_value = "true")]
|
||||||
|
skip_existing: bool,
|
||||||
|
|
||||||
|
/// Batch size for embedding generation
|
||||||
|
#[arg(long, default_value = "128")]
|
||||||
|
batch_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
info!("Parsing calendar file: {}", args.path);
|
||||||
|
let events = parse_ics_file(&args.path).context("Failed to parse .ics file")?;
|
||||||
|
|
||||||
|
info!("Found {} calendar events", events.len());
|
||||||
|
|
||||||
|
let context = opentelemetry::Context::current();
|
||||||
|
|
||||||
|
let ollama = if args.generate_embeddings {
|
||||||
|
let primary_url = dotenv::var("OLLAMA_PRIMARY_URL")
|
||||||
|
.or_else(|_| dotenv::var("OLLAMA_URL"))
|
||||||
|
.unwrap_or_else(|_| "http://localhost:11434".to_string());
|
||||||
|
let fallback_url = dotenv::var("OLLAMA_FALLBACK_URL").ok();
|
||||||
|
let primary_model = dotenv::var("OLLAMA_PRIMARY_MODEL")
|
||||||
|
.or_else(|_| dotenv::var("OLLAMA_MODEL"))
|
||||||
|
.unwrap_or_else(|_| "nomic-embed-text:v1.5".to_string());
|
||||||
|
let fallback_model = dotenv::var("OLLAMA_FALLBACK_MODEL").ok();
|
||||||
|
|
||||||
|
Some(OllamaClient::new(
|
||||||
|
primary_url,
|
||||||
|
fallback_url,
|
||||||
|
primary_model,
|
||||||
|
fallback_model,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut inserted_count = 0usize;
|
||||||
|
let mut skipped_count = 0usize;
|
||||||
|
let mut error_count = 0usize;
|
||||||
|
|
||||||
|
let pb = bin_progress::determinate(events.len() as u64, "importing");
|
||||||
|
|
||||||
|
// Process events in batches
|
||||||
|
// Can't use rayon with async, so process sequentially
|
||||||
|
for event in &events {
|
||||||
|
let mut dao_instance = SqliteCalendarEventDao::new();
|
||||||
|
|
||||||
|
// Check if event exists
|
||||||
|
if args.skip_existing
|
||||||
|
&& let Ok(exists) = dao_instance.event_exists(
|
||||||
|
&context,
|
||||||
|
event.event_uid.as_deref().unwrap_or(""),
|
||||||
|
event.start_time,
|
||||||
|
)
|
||||||
|
&& exists
|
||||||
|
{
|
||||||
|
skipped_count += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate embedding if requested (blocking call)
|
||||||
|
let embedding = if let Some(ref ollama_client) = ollama {
|
||||||
|
let text = format!(
|
||||||
|
"{} {} {}",
|
||||||
|
event.summary,
|
||||||
|
event.description.as_deref().unwrap_or(""),
|
||||||
|
event.location.as_deref().unwrap_or("")
|
||||||
|
);
|
||||||
|
|
||||||
|
match tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current()
|
||||||
|
.block_on(async { ollama_client.generate_embedding(&text).await })
|
||||||
|
}) {
|
||||||
|
Ok(emb) => Some(emb),
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("embedding failed for '{}': {}", event.summary, e));
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Insert into database
|
||||||
|
let insert_event = InsertCalendarEvent {
|
||||||
|
event_uid: event.event_uid.clone(),
|
||||||
|
summary: event.summary.clone(),
|
||||||
|
description: event.description.clone(),
|
||||||
|
location: event.location.clone(),
|
||||||
|
start_time: event.start_time,
|
||||||
|
end_time: event.end_time,
|
||||||
|
all_day: event.all_day,
|
||||||
|
organizer: event.organizer.clone(),
|
||||||
|
attendees: if event.attendees.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(serde_json::to_string(&event.attendees).unwrap_or_default())
|
||||||
|
},
|
||||||
|
embedding,
|
||||||
|
created_at: Utc::now().timestamp(),
|
||||||
|
source_file: Some(args.path.clone()),
|
||||||
|
};
|
||||||
|
|
||||||
|
match dao_instance.store_event(&context, insert_event) {
|
||||||
|
Ok(_) => inserted_count += 1,
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("store failed for '{}': {:?}", event.summary, e));
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pb.set_message(format!(
|
||||||
|
"inserted={} skipped={} errors={}",
|
||||||
|
inserted_count, skipped_count, error_count
|
||||||
|
));
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_and_clear();
|
||||||
|
|
||||||
|
info!("=== Import Summary ===");
|
||||||
|
info!("Total events found: {}", events.len());
|
||||||
|
info!("Successfully inserted: {}", inserted_count);
|
||||||
|
info!("Skipped (already exist): {}", skipped_count);
|
||||||
|
info!("Errors: {}", error_count);
|
||||||
|
|
||||||
|
if args.generate_embeddings {
|
||||||
|
info!("Embeddings were generated for semantic search");
|
||||||
|
} else {
|
||||||
|
info!("No embeddings generated (use --generate-embeddings to enable semantic search)");
|
||||||
|
}
|
||||||
|
|
||||||
|
if error_count > 0 {
|
||||||
|
error!(
|
||||||
|
"Completed with {} errors — review log output above",
|
||||||
|
error_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
122
src/bin/import_location_history.rs
Normal file
122
src/bin/import_location_history.rs
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use chrono::Utc;
|
||||||
|
use clap::Parser;
|
||||||
|
use image_api::bin_progress;
|
||||||
|
use image_api::database::location_dao::{InsertLocationRecord, SqliteLocationHistoryDao};
|
||||||
|
use image_api::parsers::location_json_parser::parse_location_json;
|
||||||
|
use log::{error, info};
|
||||||
|
// Import the trait to use its methods
|
||||||
|
use image_api::database::LocationHistoryDao;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about = "Import Google Takeout Location History data", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Path to the Location History JSON file
|
||||||
|
#[arg(short, long)]
|
||||||
|
path: String,
|
||||||
|
|
||||||
|
/// Skip locations that already exist in the database
|
||||||
|
#[arg(long, default_value = "true")]
|
||||||
|
skip_existing: bool,
|
||||||
|
|
||||||
|
/// Batch size for database inserts
|
||||||
|
#[arg(long, default_value = "1000")]
|
||||||
|
batch_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
info!("Parsing location history file: {}", args.path);
|
||||||
|
let locations =
|
||||||
|
parse_location_json(&args.path).context("Failed to parse location history JSON")?;
|
||||||
|
|
||||||
|
info!("Found {} location records", locations.len());
|
||||||
|
|
||||||
|
let context = opentelemetry::Context::current();
|
||||||
|
|
||||||
|
let mut inserted_count = 0usize;
|
||||||
|
let mut skipped_count = 0usize;
|
||||||
|
let mut error_count = 0usize;
|
||||||
|
|
||||||
|
let mut dao_instance = SqliteLocationHistoryDao::new();
|
||||||
|
let created_at = Utc::now().timestamp();
|
||||||
|
|
||||||
|
let pb = bin_progress::determinate(locations.len() as u64, "importing");
|
||||||
|
|
||||||
|
// Process in batches using batch insert for massive speedup
|
||||||
|
for chunk in locations.chunks(args.batch_size) {
|
||||||
|
// Convert to InsertLocationRecord
|
||||||
|
let mut batch_inserts = Vec::with_capacity(chunk.len());
|
||||||
|
let mut chunk_skipped = 0usize;
|
||||||
|
|
||||||
|
for location in chunk {
|
||||||
|
// Skip existing check if requested (makes import much slower)
|
||||||
|
if args.skip_existing
|
||||||
|
&& let Ok(exists) = dao_instance.location_exists(
|
||||||
|
&context,
|
||||||
|
location.timestamp,
|
||||||
|
location.latitude,
|
||||||
|
location.longitude,
|
||||||
|
)
|
||||||
|
&& exists
|
||||||
|
{
|
||||||
|
skipped_count += 1;
|
||||||
|
chunk_skipped += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
batch_inserts.push(InsertLocationRecord {
|
||||||
|
timestamp: location.timestamp,
|
||||||
|
latitude: location.latitude,
|
||||||
|
longitude: location.longitude,
|
||||||
|
accuracy: location.accuracy,
|
||||||
|
activity: location.activity.clone(),
|
||||||
|
activity_confidence: location.activity_confidence,
|
||||||
|
place_name: None,
|
||||||
|
place_category: None,
|
||||||
|
embedding: None,
|
||||||
|
created_at,
|
||||||
|
source_file: Some(args.path.clone()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch insert entire chunk in single transaction
|
||||||
|
if !batch_inserts.is_empty() {
|
||||||
|
match dao_instance.store_locations_batch(&context, batch_inserts) {
|
||||||
|
Ok(count) => inserted_count += count,
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("batch insert failed: {:?}", e));
|
||||||
|
error_count += chunk.len() - chunk_skipped;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.set_message(format!(
|
||||||
|
"inserted={} skipped={} errors={}",
|
||||||
|
inserted_count, skipped_count, error_count
|
||||||
|
));
|
||||||
|
pb.inc(chunk.len() as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_and_clear();
|
||||||
|
|
||||||
|
info!("=== Import Summary ===");
|
||||||
|
info!("Total locations found: {}", locations.len());
|
||||||
|
info!("Successfully inserted: {}", inserted_count);
|
||||||
|
info!("Skipped (already exist): {}", skipped_count);
|
||||||
|
info!("Errors: {}", error_count);
|
||||||
|
|
||||||
|
if error_count > 0 {
|
||||||
|
error!(
|
||||||
|
"Completed with {} errors — review log output above",
|
||||||
|
error_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
159
src/bin/import_search_history.rs
Normal file
159
src/bin/import_search_history.rs
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use chrono::Utc;
|
||||||
|
use clap::Parser;
|
||||||
|
use image_api::ai::ollama::OllamaClient;
|
||||||
|
use image_api::bin_progress;
|
||||||
|
use image_api::database::search_dao::{InsertSearchRecord, SqliteSearchHistoryDao};
|
||||||
|
use image_api::parsers::search_html_parser::parse_search_html;
|
||||||
|
use log::{error, info};
|
||||||
|
|
||||||
|
// Import the trait to use its methods
|
||||||
|
use image_api::database::SearchHistoryDao;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about = "Import Google Takeout Search History data", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Path to the search history HTML file
|
||||||
|
#[arg(short, long)]
|
||||||
|
path: String,
|
||||||
|
|
||||||
|
/// Skip searches that already exist in the database
|
||||||
|
#[arg(long, default_value = "true")]
|
||||||
|
skip_existing: bool,
|
||||||
|
|
||||||
|
/// Batch size for embedding generation (max 128 recommended)
|
||||||
|
#[arg(long, default_value = "64")]
|
||||||
|
batch_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
info!("Parsing search history file: {}", args.path);
|
||||||
|
let searches = parse_search_html(&args.path).context("Failed to parse search history HTML")?;
|
||||||
|
|
||||||
|
info!("Found {} search records", searches.len());
|
||||||
|
|
||||||
|
let primary_url = dotenv::var("OLLAMA_PRIMARY_URL")
|
||||||
|
.or_else(|_| dotenv::var("OLLAMA_URL"))
|
||||||
|
.unwrap_or_else(|_| "http://localhost:11434".to_string());
|
||||||
|
let fallback_url = dotenv::var("OLLAMA_FALLBACK_URL").ok();
|
||||||
|
let primary_model = dotenv::var("OLLAMA_PRIMARY_MODEL")
|
||||||
|
.or_else(|_| dotenv::var("OLLAMA_MODEL"))
|
||||||
|
.unwrap_or_else(|_| "nomic-embed-text:v1.5".to_string());
|
||||||
|
let fallback_model = dotenv::var("OLLAMA_FALLBACK_MODEL").ok();
|
||||||
|
|
||||||
|
let ollama = OllamaClient::new(primary_url, fallback_url, primary_model, fallback_model);
|
||||||
|
let context = opentelemetry::Context::current();
|
||||||
|
|
||||||
|
let mut inserted_count = 0usize;
|
||||||
|
let mut skipped_count = 0usize;
|
||||||
|
let mut error_count = 0usize;
|
||||||
|
|
||||||
|
let mut dao_instance = SqliteSearchHistoryDao::new();
|
||||||
|
let created_at = Utc::now().timestamp();
|
||||||
|
|
||||||
|
let pb = bin_progress::determinate(searches.len() as u64, "importing");
|
||||||
|
let total_batches = searches.len().div_ceil(args.batch_size);
|
||||||
|
|
||||||
|
// Process searches in batches (embeddings are REQUIRED for searches)
|
||||||
|
for (batch_idx, chunk) in searches.chunks(args.batch_size).enumerate() {
|
||||||
|
// Generate embeddings for this batch
|
||||||
|
let queries: Vec<String> = chunk.iter().map(|s| s.query.clone()).collect();
|
||||||
|
|
||||||
|
let pb_for_warn = pb.clone();
|
||||||
|
let embeddings_result = tokio::task::spawn({
|
||||||
|
let ollama_client = ollama.clone();
|
||||||
|
async move {
|
||||||
|
// Generate embeddings in parallel for the batch
|
||||||
|
let mut embeddings = Vec::new();
|
||||||
|
for query in &queries {
|
||||||
|
match ollama_client.generate_embedding(query).await {
|
||||||
|
Ok(emb) => embeddings.push(Some(emb)),
|
||||||
|
Err(e) => {
|
||||||
|
pb_for_warn.println(format!("embedding failed for '{}': {}", query, e));
|
||||||
|
embeddings.push(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
embeddings
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context("Failed to generate embeddings for batch")?;
|
||||||
|
|
||||||
|
// Build batch of searches with embeddings
|
||||||
|
let mut batch_inserts = Vec::new();
|
||||||
|
|
||||||
|
for (search, embedding_opt) in chunk.iter().zip(embeddings_result.iter()) {
|
||||||
|
// Check if search exists (optional for speed)
|
||||||
|
if args.skip_existing
|
||||||
|
&& let Ok(exists) =
|
||||||
|
dao_instance.search_exists(&context, search.timestamp, &search.query)
|
||||||
|
&& exists
|
||||||
|
{
|
||||||
|
skipped_count += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only insert if we have an embedding
|
||||||
|
if let Some(embedding) = embedding_opt {
|
||||||
|
batch_inserts.push(InsertSearchRecord {
|
||||||
|
timestamp: search.timestamp,
|
||||||
|
query: search.query.clone(),
|
||||||
|
search_engine: search.search_engine.clone(),
|
||||||
|
embedding: embedding.clone(),
|
||||||
|
created_at,
|
||||||
|
source_file: Some(args.path.clone()),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
pb.println(format!("skipping '{}' — missing embedding", search.query));
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch insert entire chunk in single transaction
|
||||||
|
if !batch_inserts.is_empty() {
|
||||||
|
match dao_instance.store_searches_batch(&context, batch_inserts) {
|
||||||
|
Ok(count) => inserted_count += count,
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("batch insert failed: {:?}", e));
|
||||||
|
error_count += chunk.len();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.set_message(format!(
|
||||||
|
"inserted={} skipped={} errors={}",
|
||||||
|
inserted_count, skipped_count, error_count
|
||||||
|
));
|
||||||
|
pb.inc(chunk.len() as u64);
|
||||||
|
|
||||||
|
// Rate limiting between batches
|
||||||
|
if batch_idx + 1 < total_batches {
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_and_clear();
|
||||||
|
|
||||||
|
info!("=== Import Summary ===");
|
||||||
|
info!("Total searches found: {}", searches.len());
|
||||||
|
info!("Successfully inserted: {}", inserted_count);
|
||||||
|
info!("Skipped (already exist): {}", skipped_count);
|
||||||
|
info!("Errors: {}", error_count);
|
||||||
|
info!("All imported searches have embeddings for semantic search");
|
||||||
|
|
||||||
|
if error_count > 0 {
|
||||||
|
error!(
|
||||||
|
"Completed with {} errors — review log output above",
|
||||||
|
error_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
356
src/bin/populate_knowledge.rs
Normal file
356
src/bin/populate_knowledge.rs
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use log::warn;
|
||||||
|
use walkdir::WalkDir;
|
||||||
|
|
||||||
|
use image_api::ai::apollo_client::ApolloClient;
|
||||||
|
use image_api::ai::{InsightGenerator, OllamaClient, SmsApiClient};
|
||||||
|
use image_api::bin_progress;
|
||||||
|
use image_api::database::{
|
||||||
|
CalendarEventDao, DailySummaryDao, ExifDao, InsightDao, KnowledgeDao, LocationHistoryDao,
|
||||||
|
SearchHistoryDao, SqliteCalendarEventDao, SqliteDailySummaryDao, SqliteExifDao,
|
||||||
|
SqliteInsightDao, SqliteKnowledgeDao, SqliteLocationHistoryDao, SqliteSearchHistoryDao,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
use image_api::file_types::{IMAGE_EXTENSIONS, VIDEO_EXTENSIONS};
|
||||||
|
use image_api::libraries::{self, Library};
|
||||||
|
use image_api::tags::{SqliteTagDao, TagDao};
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "populate_knowledge")]
|
||||||
|
#[command(
|
||||||
|
about = "Batch populate the knowledge base by running the agentic insight loop over a folder"
|
||||||
|
)]
|
||||||
|
struct Args {
|
||||||
|
/// Restrict to a single library by numeric id or name. Defaults to all
|
||||||
|
/// configured libraries.
|
||||||
|
#[arg(long)]
|
||||||
|
library: Option<String>,
|
||||||
|
|
||||||
|
/// Optional subdirectory to scan instead of full library roots. Must be
|
||||||
|
/// an absolute path under one of the selected libraries.
|
||||||
|
#[arg(long)]
|
||||||
|
path: Option<String>,
|
||||||
|
|
||||||
|
/// Ollama model override. Defaults to OLLAMA_PRIMARY_MODEL from .env
|
||||||
|
#[arg(long)]
|
||||||
|
model: Option<String>,
|
||||||
|
|
||||||
|
/// Maximum agentic loop iterations per file
|
||||||
|
#[arg(long, default_value_t = 12)]
|
||||||
|
max_iterations: usize,
|
||||||
|
|
||||||
|
/// HTTP request timeout in seconds. Increase for large/slow models
|
||||||
|
#[arg(long, default_value_t = 120)]
|
||||||
|
timeout_secs: u64,
|
||||||
|
|
||||||
|
/// Context window size (num_ctx) passed to the model
|
||||||
|
#[arg(long)]
|
||||||
|
num_ctx: Option<i32>,
|
||||||
|
|
||||||
|
/// Sampling temperature (e.g. 0.8). Omit for model default
|
||||||
|
#[arg(long)]
|
||||||
|
temperature: Option<f32>,
|
||||||
|
|
||||||
|
/// Top-p (nucleus) sampling (e.g. 0.9). Omit for model default
|
||||||
|
#[arg(long)]
|
||||||
|
top_p: Option<f32>,
|
||||||
|
|
||||||
|
/// Top-k sampling (e.g. 40). Omit for model default
|
||||||
|
#[arg(long)]
|
||||||
|
top_k: Option<i32>,
|
||||||
|
|
||||||
|
/// Min-p sampling (e.g. 0.05). Omit for model default
|
||||||
|
#[arg(long)]
|
||||||
|
min_p: Option<f32>,
|
||||||
|
|
||||||
|
/// Re-process files that already have an insight stored
|
||||||
|
#[arg(long, default_value_t = false)]
|
||||||
|
reprocess: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
env_logger::init();
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Load libraries from the DB. Patch the placeholder row from BASE_PATH
|
||||||
|
// first when present so a fresh install still gets a valid root.
|
||||||
|
let env_base_path = dotenv::var("BASE_PATH").ok();
|
||||||
|
let mut seed_conn = connect();
|
||||||
|
if let Some(base) = env_base_path.as_deref() {
|
||||||
|
libraries::seed_or_patch_from_env(&mut seed_conn, base);
|
||||||
|
}
|
||||||
|
let all_libs = libraries::load_all(&mut seed_conn);
|
||||||
|
drop(seed_conn);
|
||||||
|
if all_libs.is_empty() {
|
||||||
|
anyhow::bail!("No libraries configured");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve --library to a concrete subset.
|
||||||
|
let selected_libs: Vec<Library> = match args.library.as_deref() {
|
||||||
|
None => all_libs.clone(),
|
||||||
|
Some(raw) => {
|
||||||
|
let raw = raw.trim();
|
||||||
|
let matched = if let Ok(id) = raw.parse::<i32>() {
|
||||||
|
all_libs.iter().find(|l| l.id == id).cloned()
|
||||||
|
} else {
|
||||||
|
all_libs.iter().find(|l| l.name == raw).cloned()
|
||||||
|
};
|
||||||
|
match matched {
|
||||||
|
Some(lib) => vec![lib],
|
||||||
|
None => anyhow::bail!("Unknown library: {}", raw),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Resolve --path to (target_library, walk_root). When provided, the path
|
||||||
|
// must live under exactly one of the selected libraries.
|
||||||
|
let scan_targets: Vec<(Library, PathBuf)> = match args.path.as_deref() {
|
||||||
|
None => selected_libs
|
||||||
|
.iter()
|
||||||
|
.map(|lib| (lib.clone(), PathBuf::from(&lib.root_path)))
|
||||||
|
.collect(),
|
||||||
|
Some(raw) => {
|
||||||
|
let abs = PathBuf::from(raw);
|
||||||
|
let matched = selected_libs
|
||||||
|
.iter()
|
||||||
|
.find(|lib| abs.starts_with(&lib.root_path))
|
||||||
|
.cloned();
|
||||||
|
match matched {
|
||||||
|
Some(lib) => vec![(lib, abs)],
|
||||||
|
None => anyhow::bail!("--path {} is not under any selected library root", raw),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ollama config from env with CLI overrides.
|
||||||
|
let primary_url = std::env::var("OLLAMA_PRIMARY_URL")
|
||||||
|
.or_else(|_| std::env::var("OLLAMA_URL"))
|
||||||
|
.unwrap_or_else(|_| "http://localhost:11434".to_string());
|
||||||
|
let fallback_url = std::env::var("OLLAMA_FALLBACK_URL").ok();
|
||||||
|
let primary_model = args
|
||||||
|
.model
|
||||||
|
.clone()
|
||||||
|
.or_else(|| std::env::var("OLLAMA_PRIMARY_MODEL").ok())
|
||||||
|
.or_else(|| std::env::var("OLLAMA_MODEL").ok())
|
||||||
|
.unwrap_or_else(|| "nemotron-3-nano:30b".to_string());
|
||||||
|
let fallback_model = std::env::var("OLLAMA_FALLBACK_MODEL").ok();
|
||||||
|
|
||||||
|
let mut ollama = OllamaClient::new(
|
||||||
|
primary_url.clone(),
|
||||||
|
fallback_url,
|
||||||
|
primary_model.clone(),
|
||||||
|
fallback_model,
|
||||||
|
)
|
||||||
|
.with_request_timeout(args.timeout_secs);
|
||||||
|
|
||||||
|
if let Some(ctx) = args.num_ctx {
|
||||||
|
ollama.set_num_ctx(Some(ctx));
|
||||||
|
}
|
||||||
|
if args.temperature.is_some()
|
||||||
|
|| args.top_p.is_some()
|
||||||
|
|| args.top_k.is_some()
|
||||||
|
|| args.min_p.is_some()
|
||||||
|
{
|
||||||
|
ollama.set_sampling_params(args.temperature, args.top_p, args.top_k, args.min_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
let sms_api_url =
|
||||||
|
std::env::var("SMS_API_URL").unwrap_or_else(|_| "http://localhost:8000".to_string());
|
||||||
|
let sms_api_token = std::env::var("SMS_API_TOKEN").ok();
|
||||||
|
let sms_client = SmsApiClient::new(sms_api_url, sms_api_token);
|
||||||
|
let apollo_client = ApolloClient::new(std::env::var("APOLLO_API_BASE_URL").ok());
|
||||||
|
|
||||||
|
let insight_dao: Arc<Mutex<Box<dyn InsightDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteInsightDao::new())));
|
||||||
|
let exif_dao: Arc<Mutex<Box<dyn ExifDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteExifDao::new())));
|
||||||
|
let daily_summary_dao: Arc<Mutex<Box<dyn DailySummaryDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteDailySummaryDao::new())));
|
||||||
|
let calendar_dao: Arc<Mutex<Box<dyn CalendarEventDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteCalendarEventDao::new())));
|
||||||
|
let location_dao: Arc<Mutex<Box<dyn LocationHistoryDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteLocationHistoryDao::new())));
|
||||||
|
let search_dao: Arc<Mutex<Box<dyn SearchHistoryDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteSearchHistoryDao::new())));
|
||||||
|
let tag_dao: Arc<Mutex<Box<dyn TagDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteTagDao::default())));
|
||||||
|
let knowledge_dao: Arc<Mutex<Box<dyn KnowledgeDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteKnowledgeDao::new())));
|
||||||
|
|
||||||
|
// Pass the full library set so `resolve_full_path` probes every root,
|
||||||
|
// even when --library restricts the walk. A rel_path shared across
|
||||||
|
// libraries will resolve against the first existing match.
|
||||||
|
let generator = InsightGenerator::new(
|
||||||
|
ollama,
|
||||||
|
None,
|
||||||
|
sms_client,
|
||||||
|
apollo_client,
|
||||||
|
insight_dao.clone(),
|
||||||
|
exif_dao,
|
||||||
|
daily_summary_dao,
|
||||||
|
calendar_dao,
|
||||||
|
location_dao,
|
||||||
|
search_dao,
|
||||||
|
tag_dao,
|
||||||
|
knowledge_dao,
|
||||||
|
all_libs.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
println!("Knowledge Base Population");
|
||||||
|
println!("=========================");
|
||||||
|
for (lib, root) in &scan_targets {
|
||||||
|
println!("Library: {} (id={})", lib.name, lib.id);
|
||||||
|
println!("Scan root: {}", root.display());
|
||||||
|
}
|
||||||
|
println!("Model: {}", primary_model);
|
||||||
|
println!("Max iterations: {}", args.max_iterations);
|
||||||
|
println!("Timeout: {}s", args.timeout_secs);
|
||||||
|
if let Some(ctx) = args.num_ctx {
|
||||||
|
println!("Num ctx: {}", ctx);
|
||||||
|
}
|
||||||
|
if let Some(t) = args.temperature {
|
||||||
|
println!("Temperature: {}", t);
|
||||||
|
}
|
||||||
|
if let Some(p) = args.top_p {
|
||||||
|
println!("Top P: {}", p);
|
||||||
|
}
|
||||||
|
if let Some(k) = args.top_k {
|
||||||
|
println!("Top K: {}", k);
|
||||||
|
}
|
||||||
|
if let Some(m) = args.min_p {
|
||||||
|
println!("Min P: {}", m);
|
||||||
|
}
|
||||||
|
println!(
|
||||||
|
"Mode: {}",
|
||||||
|
if args.reprocess {
|
||||||
|
"reprocess all"
|
||||||
|
} else {
|
||||||
|
"skip existing"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
let all_extensions: Vec<&str> = IMAGE_EXTENSIONS
|
||||||
|
.iter()
|
||||||
|
.chain(VIDEO_EXTENSIONS.iter())
|
||||||
|
.copied()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Collect (library, abs_path, rel_path) for every media file across all
|
||||||
|
// scan targets so the progress counter spans the full job.
|
||||||
|
let mut files: Vec<(Library, PathBuf, String)> = Vec::new();
|
||||||
|
for (lib, walk_root) in &scan_targets {
|
||||||
|
let lib_root = Path::new(&lib.root_path);
|
||||||
|
let scan_pb = bin_progress::spinner(format!("scanning {}", walk_root.display()));
|
||||||
|
let count_before = files.len();
|
||||||
|
for entry in WalkDir::new(walk_root).into_iter().filter_map(|e| e.ok()) {
|
||||||
|
if !entry.file_type().is_file() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let abs_path = entry.path().to_path_buf();
|
||||||
|
let ext_ok = abs_path
|
||||||
|
.extension()
|
||||||
|
.and_then(|ext| ext.to_str())
|
||||||
|
.map(|ext| all_extensions.contains(&ext.to_lowercase().as_str()))
|
||||||
|
.unwrap_or(false);
|
||||||
|
if !ext_ok {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let rel = match abs_path.strip_prefix(lib_root) {
|
||||||
|
Ok(p) => p.to_string_lossy().replace('\\', "/"),
|
||||||
|
Err(_) => {
|
||||||
|
warn!(
|
||||||
|
"{} is not under library root {}; skipping",
|
||||||
|
abs_path.display(),
|
||||||
|
lib_root.display()
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
files.push((lib.clone(), abs_path, rel));
|
||||||
|
scan_pb.inc(1);
|
||||||
|
}
|
||||||
|
let added = files.len() - count_before;
|
||||||
|
scan_pb.finish_with_message(format!(
|
||||||
|
"scanned {} ({} media files)",
|
||||||
|
walk_root.display(),
|
||||||
|
added
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let total = files.len();
|
||||||
|
println!("\nTotal files to consider: {}\n", total);
|
||||||
|
|
||||||
|
if total == 0 {
|
||||||
|
println!("Nothing to process.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let cx = opentelemetry::Context::new();
|
||||||
|
let mut processed = 0usize;
|
||||||
|
let mut skipped = 0usize;
|
||||||
|
let mut errors = 0usize;
|
||||||
|
|
||||||
|
let pb = bin_progress::determinate(total as u64, "");
|
||||||
|
|
||||||
|
for (lib, _abs_path, relative) in files.iter() {
|
||||||
|
pb.set_message(format!("{}: {}", lib.name, relative));
|
||||||
|
|
||||||
|
if !args.reprocess {
|
||||||
|
let has_insight = insight_dao
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_insight(&cx, relative)
|
||||||
|
.unwrap_or(None)
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if has_insight {
|
||||||
|
skipped += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match generator
|
||||||
|
.generate_agentic_insight_for_photo(
|
||||||
|
relative,
|
||||||
|
args.model.clone(),
|
||||||
|
None,
|
||||||
|
args.num_ctx,
|
||||||
|
args.temperature,
|
||||||
|
args.top_p,
|
||||||
|
args.top_k,
|
||||||
|
args.min_p,
|
||||||
|
args.max_iterations,
|
||||||
|
None,
|
||||||
|
Vec::new(),
|
||||||
|
Vec::new(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => processed += 1,
|
||||||
|
Err(e) => {
|
||||||
|
pb.println(format!("error {}: {} — {:?}", lib.name, relative, e));
|
||||||
|
errors += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_and_clear();
|
||||||
|
|
||||||
|
println!();
|
||||||
|
println!("=========================");
|
||||||
|
println!("Complete");
|
||||||
|
println!(" Processed: {}", processed);
|
||||||
|
println!(" Skipped: {}", skipped);
|
||||||
|
println!(" Errors: {}", errors);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
278
src/bin/test_daily_summary.rs
Normal file
278
src/bin/test_daily_summary.rs
Normal file
@@ -0,0 +1,278 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use chrono::NaiveDate;
|
||||||
|
use clap::Parser;
|
||||||
|
use image_api::ai::{
|
||||||
|
EMBEDDING_MODEL, OllamaClient, SmsApiClient, build_daily_summary_prompt,
|
||||||
|
strip_summary_boilerplate, user_display_name,
|
||||||
|
};
|
||||||
|
use image_api::database::{DailySummaryDao, InsertDailySummary, SqliteDailySummaryDao};
|
||||||
|
use std::env;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about = "Test daily summary generation with different models and prompts", long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Contact name to generate summaries for
|
||||||
|
#[arg(short, long)]
|
||||||
|
contact: String,
|
||||||
|
|
||||||
|
/// Start date (YYYY-MM-DD)
|
||||||
|
#[arg(short, long)]
|
||||||
|
start: String,
|
||||||
|
|
||||||
|
/// End date (YYYY-MM-DD)
|
||||||
|
#[arg(short, long)]
|
||||||
|
end: String,
|
||||||
|
|
||||||
|
/// Optional: Override the model to use (e.g., "qwen2.5:32b", "llama3.1:30b")
|
||||||
|
#[arg(short, long)]
|
||||||
|
model: Option<String>,
|
||||||
|
|
||||||
|
/// Context window size passed as Ollama `num_ctx`. Omit for server default.
|
||||||
|
#[arg(long)]
|
||||||
|
num_ctx: Option<i32>,
|
||||||
|
|
||||||
|
/// Sampling temperature. Omit for server default.
|
||||||
|
#[arg(long)]
|
||||||
|
temperature: Option<f32>,
|
||||||
|
|
||||||
|
/// Top-p (nucleus) sampling. Omit for server default.
|
||||||
|
#[arg(long)]
|
||||||
|
top_p: Option<f32>,
|
||||||
|
|
||||||
|
/// Top-k sampling. Omit for server default.
|
||||||
|
#[arg(long)]
|
||||||
|
top_k: Option<i32>,
|
||||||
|
|
||||||
|
/// Min-p sampling. Omit for server default.
|
||||||
|
#[arg(long)]
|
||||||
|
min_p: Option<f32>,
|
||||||
|
|
||||||
|
/// Test mode: Generate but don't save to database (shows output only)
|
||||||
|
#[arg(short = 't', long, default_value_t = false)]
|
||||||
|
test_mode: bool,
|
||||||
|
|
||||||
|
/// Show message count and preview
|
||||||
|
#[arg(short, long, default_value_t = false)]
|
||||||
|
verbose: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
// Load .env file
|
||||||
|
dotenv::dotenv().ok();
|
||||||
|
|
||||||
|
// Initialize logging
|
||||||
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Parse dates
|
||||||
|
let start_date = NaiveDate::parse_from_str(&args.start, "%Y-%m-%d")
|
||||||
|
.expect("Invalid start date format. Use YYYY-MM-DD");
|
||||||
|
let end_date = NaiveDate::parse_from_str(&args.end, "%Y-%m-%d")
|
||||||
|
.expect("Invalid end date format. Use YYYY-MM-DD");
|
||||||
|
|
||||||
|
println!("========================================");
|
||||||
|
println!("Daily Summary Generation Test Tool");
|
||||||
|
println!("========================================");
|
||||||
|
println!("Contact: {}", args.contact);
|
||||||
|
println!("Date range: {} to {}", start_date, end_date);
|
||||||
|
println!("Days: {}", (end_date - start_date).num_days() + 1);
|
||||||
|
if let Some(ref model) = args.model {
|
||||||
|
println!("Model: {}", model);
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"Model: {} (from env)",
|
||||||
|
env::var("OLLAMA_PRIMARY_MODEL")
|
||||||
|
.or_else(|_| env::var("OLLAMA_MODEL"))
|
||||||
|
.unwrap_or_else(|_| "nemotron-3-nano:30b".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if args.test_mode {
|
||||||
|
println!("⚠ TEST MODE: Results will NOT be saved to database");
|
||||||
|
}
|
||||||
|
println!("========================================");
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Initialize AI clients
|
||||||
|
let ollama_primary_url = env::var("OLLAMA_PRIMARY_URL")
|
||||||
|
.or_else(|_| env::var("OLLAMA_URL"))
|
||||||
|
.unwrap_or_else(|_| "http://localhost:11434".to_string());
|
||||||
|
|
||||||
|
let ollama_fallback_url = env::var("OLLAMA_FALLBACK_URL").ok();
|
||||||
|
|
||||||
|
// Use provided model or fallback to env
|
||||||
|
let model_to_use = args.model.clone().unwrap_or_else(|| {
|
||||||
|
env::var("OLLAMA_PRIMARY_MODEL")
|
||||||
|
.or_else(|_| env::var("OLLAMA_MODEL"))
|
||||||
|
.unwrap_or_else(|_| "nemotron-3-nano:30b".to_string())
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut ollama = OllamaClient::new(
|
||||||
|
ollama_primary_url,
|
||||||
|
ollama_fallback_url.clone(),
|
||||||
|
model_to_use.clone(),
|
||||||
|
Some(model_to_use), // Use same model for fallback
|
||||||
|
);
|
||||||
|
if let Some(ctx) = args.num_ctx {
|
||||||
|
ollama.set_num_ctx(Some(ctx));
|
||||||
|
}
|
||||||
|
if args.temperature.is_some()
|
||||||
|
|| args.top_p.is_some()
|
||||||
|
|| args.top_k.is_some()
|
||||||
|
|| args.min_p.is_some()
|
||||||
|
{
|
||||||
|
ollama.set_sampling_params(args.temperature, args.top_p, args.top_k, args.min_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Surface what's actually configured so comparison runs are auditable.
|
||||||
|
println!(
|
||||||
|
"num_ctx={:?} temperature={:?} top_p={:?} top_k={:?} min_p={:?}",
|
||||||
|
args.num_ctx, args.temperature, args.top_p, args.top_k, args.min_p
|
||||||
|
);
|
||||||
|
|
||||||
|
let sms_api_url =
|
||||||
|
env::var("SMS_API_URL").unwrap_or_else(|_| "http://localhost:8000".to_string());
|
||||||
|
let sms_api_token = env::var("SMS_API_TOKEN").ok();
|
||||||
|
let sms_client = SmsApiClient::new(sms_api_url, sms_api_token);
|
||||||
|
|
||||||
|
// Initialize DAO
|
||||||
|
let summary_dao: Arc<Mutex<Box<dyn DailySummaryDao>>> =
|
||||||
|
Arc::new(Mutex::new(Box::new(SqliteDailySummaryDao::new())));
|
||||||
|
|
||||||
|
// Fetch messages for contact
|
||||||
|
println!("Fetching messages for {}...", args.contact);
|
||||||
|
let all_messages = sms_client
|
||||||
|
.fetch_all_messages_for_contact(&args.contact)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Found {} total messages for {}",
|
||||||
|
all_messages.len(),
|
||||||
|
args.contact
|
||||||
|
);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Filter to date range and group by date
|
||||||
|
let mut messages_by_date = std::collections::HashMap::new();
|
||||||
|
|
||||||
|
for msg in all_messages {
|
||||||
|
if let Some(dt) = chrono::DateTime::from_timestamp(msg.timestamp, 0) {
|
||||||
|
let date = dt.date_naive();
|
||||||
|
if date >= start_date && date <= end_date {
|
||||||
|
messages_by_date
|
||||||
|
.entry(date)
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if messages_by_date.is_empty() {
|
||||||
|
println!("⚠ No messages found in date range");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Found {} days with messages", messages_by_date.len());
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Sort dates
|
||||||
|
let mut dates: Vec<NaiveDate> = messages_by_date.keys().cloned().collect();
|
||||||
|
dates.sort();
|
||||||
|
|
||||||
|
// Process each day
|
||||||
|
for (idx, date) in dates.iter().enumerate() {
|
||||||
|
let messages = messages_by_date.get(date).unwrap();
|
||||||
|
let date_str = date.format("%Y-%m-%d").to_string();
|
||||||
|
let weekday = date.format("%A");
|
||||||
|
|
||||||
|
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
||||||
|
println!(
|
||||||
|
"Day {}/{}: {} ({}) - {} messages",
|
||||||
|
idx + 1,
|
||||||
|
dates.len(),
|
||||||
|
date_str,
|
||||||
|
weekday,
|
||||||
|
messages.len()
|
||||||
|
);
|
||||||
|
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
||||||
|
|
||||||
|
if args.verbose {
|
||||||
|
let user_name = user_display_name();
|
||||||
|
println!("\nMessage preview:");
|
||||||
|
for (i, msg) in messages.iter().take(3).enumerate() {
|
||||||
|
let sender: &str = if msg.is_sent {
|
||||||
|
&user_name
|
||||||
|
} else {
|
||||||
|
&msg.contact
|
||||||
|
};
|
||||||
|
let preview = msg.body.chars().take(60).collect::<String>();
|
||||||
|
println!(" {}. {}: {}...", i + 1, sender, preview);
|
||||||
|
}
|
||||||
|
if messages.len() > 3 {
|
||||||
|
println!(" ... and {} more", messages.len() - 3);
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (prompt, system_prompt) = build_daily_summary_prompt(&args.contact, date, messages);
|
||||||
|
|
||||||
|
println!("Generating summary...");
|
||||||
|
|
||||||
|
let summary = ollama.generate(&prompt, Some(system_prompt)).await?;
|
||||||
|
|
||||||
|
println!("\n📝 GENERATED SUMMARY:");
|
||||||
|
println!("─────────────────────────────────────────");
|
||||||
|
println!("{}", summary.trim());
|
||||||
|
println!("─────────────────────────────────────────");
|
||||||
|
|
||||||
|
if !args.test_mode {
|
||||||
|
println!("\nStripping boilerplate for embedding...");
|
||||||
|
let stripped = strip_summary_boilerplate(&summary);
|
||||||
|
println!(
|
||||||
|
"Stripped: {}...",
|
||||||
|
stripped.chars().take(80).collect::<String>()
|
||||||
|
);
|
||||||
|
|
||||||
|
println!("\nGenerating embedding...");
|
||||||
|
let embedding = ollama.generate_embedding(&stripped).await?;
|
||||||
|
println!("✓ Embedding generated ({} dimensions)", embedding.len());
|
||||||
|
|
||||||
|
println!("Saving to database...");
|
||||||
|
let insert = InsertDailySummary {
|
||||||
|
date: date_str.clone(),
|
||||||
|
contact: args.contact.clone(),
|
||||||
|
summary: summary.trim().to_string(),
|
||||||
|
message_count: messages.len() as i32,
|
||||||
|
embedding,
|
||||||
|
created_at: chrono::Utc::now().timestamp(),
|
||||||
|
model_version: EMBEDDING_MODEL.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut dao = summary_dao.lock().expect("Unable to lock DailySummaryDao");
|
||||||
|
let context = opentelemetry::Context::new();
|
||||||
|
|
||||||
|
match dao.store_summary(&context, insert) {
|
||||||
|
Ok(_) => println!("✓ Saved to database"),
|
||||||
|
Err(e) => println!("✗ Database error: {:?}", e),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
println!("\n⚠ TEST MODE: Not saved to database");
|
||||||
|
}
|
||||||
|
|
||||||
|
println!();
|
||||||
|
|
||||||
|
// Rate limiting between days
|
||||||
|
if idx < dates.len() - 1 {
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("========================================");
|
||||||
|
println!("✓ Complete!");
|
||||||
|
println!("Processed {} days", dates.len());
|
||||||
|
println!("========================================");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
34
src/bin_progress.rs
Normal file
34
src/bin_progress.rs
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
//! Shared progress-bar styling for the utility binaries. Centralised so every
|
||||||
|
//! `cargo run --bin ...` tool gets the same look and feel.
|
||||||
|
|
||||||
|
use indicatif::{ProgressBar, ProgressStyle};
|
||||||
|
|
||||||
|
const DETERMINATE_TEMPLATE: &str = "{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] \
|
||||||
|
{human_pos}/{human_len} ({percent}%) {per_sec} eta {eta} {msg}";
|
||||||
|
|
||||||
|
const SPINNER_TEMPLATE: &str = "{spinner:.green} [{elapsed_precise}] {human_pos} {per_sec} {msg}";
|
||||||
|
|
||||||
|
/// Determinate progress bar used when the total work is known up front.
|
||||||
|
pub fn determinate(total: u64, message: impl Into<String>) -> ProgressBar {
|
||||||
|
let pb = ProgressBar::new(total);
|
||||||
|
pb.set_style(
|
||||||
|
ProgressStyle::with_template(DETERMINATE_TEMPLATE)
|
||||||
|
.expect("hard-coded template parses")
|
||||||
|
.progress_chars("=> "),
|
||||||
|
);
|
||||||
|
pb.set_message(message.into());
|
||||||
|
pb
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spinner used for open-ended work (e.g. paginated DB scans that loop until
|
||||||
|
/// empty). Throughput is shown via `{per_sec}`; tick at a steady cadence so
|
||||||
|
/// it animates even when work is bursty.
|
||||||
|
pub fn spinner(message: impl Into<String>) -> ProgressBar {
|
||||||
|
let pb = ProgressBar::new_spinner();
|
||||||
|
pb.set_style(
|
||||||
|
ProgressStyle::with_template(SPINNER_TEMPLATE).expect("hard-coded template parses"),
|
||||||
|
);
|
||||||
|
pb.set_message(message.into());
|
||||||
|
pb.enable_steady_tick(std::time::Duration::from_millis(120));
|
||||||
|
pb
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user