Updates, big updates
incident_correlator.py — full rewrite: always runs on every call, fetches all active incidents cross-type, fast path collects all talkgroup matches and disambiguates by unit/vehicle overlap → location proximity → embedding, new location proximity path, slow path requires location corroboration, "Auto:" stripped from titles, "auto-generated" tag added, units/vehicles now accumulated on update intelligence.py — resolved field in GPT schema, returned as 5th value upload.py — both pipelines unpack 5-tuple, always call correlate, auto-resolve on resolved=True summarizer.py — stale sweep runs each tick, resolves incidents idle for 90+ minutes config.py — correlation_window_hours=2, embedding_similarity_threshold=0.93, location_proximity_km=0.5, incident_auto_resolve_minutes=90
This commit is contained in:
@@ -1,15 +1,14 @@
|
||||
"""
|
||||
Background incident summary loop.
|
||||
|
||||
Runs every SUMMARY_INTERVAL_MINUTES. Finds all active incidents with
|
||||
summary_stale=True, fetches all their call transcripts, and calls Gemini
|
||||
once per incident to produce a concise factual summary.
|
||||
|
||||
By batching this way: Gemini is never called per-call — only periodically
|
||||
and only for incidents that have actually changed since the last run.
|
||||
Runs every SUMMARY_INTERVAL_MINUTES. Two passes per tick:
|
||||
1. Summary pass — find stale incidents (summary_stale=True) and regenerate summaries.
|
||||
2. Stale sweep — auto-resolve incidents with no new calls for incident_auto_resolve_minutes.
|
||||
This is effectively "time since last call" because updated_at is stamped on every
|
||||
new linked call.
|
||||
"""
|
||||
import asyncio
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional
|
||||
from app.internal.logger import logger
|
||||
from app.internal import firestore as fstore
|
||||
@@ -23,6 +22,7 @@ async def summarizer_loop() -> None:
|
||||
await asyncio.sleep(interval)
|
||||
try:
|
||||
await _run_summary_pass()
|
||||
await _resolve_stale_incidents()
|
||||
except Exception as e:
|
||||
logger.error(f"Summarizer pass failed: {e}")
|
||||
|
||||
@@ -74,6 +74,41 @@ async def _summarize_incident(inc: dict) -> None:
|
||||
await fstore.doc_set("incidents", incident_id, updates)
|
||||
|
||||
|
||||
async def _resolve_stale_incidents() -> None:
|
||||
"""Auto-resolve active incidents that have had no new calls for incident_auto_resolve_minutes."""
|
||||
all_active = await fstore.collection_list("incidents", status="active")
|
||||
if not all_active:
|
||||
return
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
cutoff = timedelta(minutes=settings.incident_auto_resolve_minutes)
|
||||
count = 0
|
||||
|
||||
for inc in all_active:
|
||||
incident_id = inc.get("incident_id")
|
||||
if not incident_id:
|
||||
continue
|
||||
try:
|
||||
updated_dt = datetime.fromisoformat(
|
||||
str(inc.get("updated_at", "")).replace("Z", "+00:00")
|
||||
)
|
||||
if updated_dt.tzinfo is None:
|
||||
updated_dt = updated_dt.replace(tzinfo=timezone.utc)
|
||||
idle_minutes = (now - updated_dt).total_seconds() / 60
|
||||
if idle_minutes > settings.incident_auto_resolve_minutes:
|
||||
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
||||
logger.info(
|
||||
f"Auto-resolved stale incident {incident_id} "
|
||||
f"(idle {idle_minutes:.0f}m)"
|
||||
)
|
||||
count += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Stale sweep error for {incident_id}: {e}")
|
||||
|
||||
if count:
|
||||
logger.info(f"Stale sweep: resolved {count} incident(s)")
|
||||
|
||||
|
||||
def _sync_summarize(inc: dict, transcripts: list[str]) -> Optional[str]:
|
||||
from app.config import settings
|
||||
from openai import OpenAI
|
||||
|
||||
Reference in New Issue
Block a user