Updates, big updates

incident_correlator.py — full rewrite: always runs on every call, fetches all active incidents cross-type, fast path collects all talkgroup matches and disambiguates by unit/vehicle overlap → location proximity → embedding, new location proximity path, slow path requires location corroboration, "Auto:" stripped from titles, "auto-generated" tag added, units/vehicles now accumulated on update
intelligence.py — resolved field in GPT schema, returned as 5th value
upload.py — both pipelines unpack 5-tuple, always call correlate, auto-resolve on resolved=True
summarizer.py — stale sweep runs each tick, resolves incidents idle for 90+ minutes
config.py — correlation_window_hours=2, embedding_similarity_threshold=0.93, location_proximity_km=0.5, incident_auto_resolve_minutes=90
This commit is contained in:
Logan
2026-04-19 22:53:53 -04:00
parent f9d4fcbc39
commit ba43796c51
9 changed files with 539 additions and 293 deletions
+34 -27
View File
@@ -95,24 +95,27 @@ async def _run_extraction_pipeline(
"""Run steps 2-4 of the intelligence pipeline using an existing transcript."""
from app.internal import intelligence, incident_correlator, alerter
tags, incident_type, location, location_coords = await intelligence.extract_tags(
tags, incident_type, location, location_coords, resolved = await intelligence.extract_tags(
call_id, transcript, talkgroup_name,
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
node_id=node_id,
)
if incident_type:
await incident_correlator.correlate_call(
call_id=call_id,
node_id=node_id,
system_id=system_id,
talkgroup_id=talkgroup_id,
talkgroup_name=talkgroup_name,
tags=tags,
incident_type=incident_type,
location=location,
location_coords=location_coords,
)
incident_id = await incident_correlator.correlate_call(
call_id=call_id,
node_id=node_id,
system_id=system_id,
talkgroup_id=talkgroup_id,
talkgroup_name=talkgroup_name,
tags=tags,
incident_type=incident_type,
location=location,
location_coords=location_coords,
)
if resolved and incident_id:
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
await alerter.check_and_dispatch(
call_id=call_id,
@@ -153,26 +156,30 @@ async def _run_intelligence_pipeline(
incident_type: Optional[str] = None
location: Optional[str] = None
location_coords: Optional[dict] = None
resolved: bool = False
if transcript:
tags, incident_type, location, location_coords = await intelligence.extract_tags(
tags, incident_type, location, location_coords, resolved = await intelligence.extract_tags(
call_id, transcript, talkgroup_name,
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
node_id=node_id,
)
# Step 3: Incident correlation
if incident_type:
await incident_correlator.correlate_call(
call_id=call_id,
node_id=node_id,
system_id=system_id,
talkgroup_id=talkgroup_id,
talkgroup_name=talkgroup_name,
tags=tags,
incident_type=incident_type,
location=location,
location_coords=location_coords,
)
# Step 3: Incident correlation (always runs — unclassified calls can still link via talkgroup)
incident_id = await incident_correlator.correlate_call(
call_id=call_id,
node_id=node_id,
system_id=system_id,
talkgroup_id=talkgroup_id,
talkgroup_name=talkgroup_name,
tags=tags,
incident_type=incident_type,
location=location,
location_coords=location_coords,
)
if resolved and incident_id:
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
# Step 4: Alert dispatch (always runs — talkgroup ID rules don't need a transcript)
await alerter.check_and_dispatch(