Compare commits
20 Commits
6612e4b683
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 4006232c85 | |||
| 4c3b1fcc84 | |||
| 8b660d8e10 | |||
| 7e1b01a275 | |||
| 97f4286810 | |||
| e704df1a62 | |||
| f6897566f8 | |||
| 531ce64eeb | |||
| f8a9cda27e | |||
| 640667c9f9 | |||
| 5f83194420 | |||
| c959437059 | |||
| 92c8351864 | |||
| 64232279ca | |||
| 317f9d2a9d | |||
| bcd3406ae8 | |||
| e70e7c0be9 | |||
| 88103c8011 | |||
| 65839a3191 | |||
| 338b946ba3 |
@@ -3,7 +3,7 @@ FROM python:3.14-slim
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install uv && uv pip install --system --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
COPY app/ ./app/
|
COPY app/ ./app/
|
||||||
COPY tests/ ./tests/
|
COPY tests/ ./tests/
|
||||||
|
|||||||
@@ -24,9 +24,17 @@ class Settings(BaseSettings):
|
|||||||
gemini_api_key: Optional[str] = None
|
gemini_api_key: Optional[str] = None
|
||||||
summary_interval_minutes: int = 2 # how often the summary loop runs
|
summary_interval_minutes: int = 2 # how often the summary loop runs
|
||||||
correlation_window_hours: int = 2 # slow/location path: max hours since last call
|
correlation_window_hours: int = 2 # slow/location path: max hours since last call
|
||||||
embedding_similarity_threshold: float = 0.93 # slow-path cosine threshold (tiebreaker only)
|
embedding_similarity_threshold: float = 0.93 # slow-path: requires location corroboration
|
||||||
|
embedding_no_location_threshold: float = 0.97 # slow-path: match without location (very high bar)
|
||||||
|
embedding_cross_tg_threshold: float = 0.85 # cross-TG path: same dept + 2+ shared units
|
||||||
location_proximity_km: float = 0.5 # radius for location-proximity matching
|
location_proximity_km: float = 0.5 # radius for location-proximity matching
|
||||||
incident_auto_resolve_minutes: int = 90 # auto-resolve after N minutes with no new calls
|
incident_auto_resolve_minutes: int = 90 # auto-resolve after N minutes with no new calls
|
||||||
|
recorrelation_scan_minutes: int = 60 # re-examine orphaned calls ended within this window
|
||||||
|
tg_fast_path_idle_minutes: int = 90 # fast path: max minutes since incident last updated
|
||||||
|
|
||||||
|
# Vocabulary learning
|
||||||
|
vocabulary_induction_interval_hours: int = 24 # how often the induction loop runs
|
||||||
|
vocabulary_induction_sample_tokens: int = 4000 # ~tokens of transcript text sampled per system
|
||||||
|
|
||||||
# Internal service key — allows server-side services (discord bot) to call C2 without Firebase
|
# Internal service key — allows server-side services (discord bot) to call C2 without Firebase
|
||||||
service_key: Optional[str] = None
|
service_key: Optional[str] = None
|
||||||
|
|||||||
@@ -0,0 +1,62 @@
|
|||||||
|
"""
|
||||||
|
Global AI feature flags stored in Firestore at config/ai_features.
|
||||||
|
|
||||||
|
Defaults to all-on when the document does not exist yet. Uses a short
|
||||||
|
in-memory TTL cache so flag reads don't add a Firestore round-trip to every
|
||||||
|
call upload.
|
||||||
|
"""
|
||||||
|
import time
|
||||||
|
from typing import Any
|
||||||
|
from app.internal.logger import logger
|
||||||
|
from app.internal import firestore as fstore
|
||||||
|
|
||||||
|
_COLLECTION = "config"
|
||||||
|
_DOC_ID = "ai_features"
|
||||||
|
_TTL = 30.0 # seconds before re-reading from Firestore
|
||||||
|
|
||||||
|
_DEFAULTS: dict[str, bool] = {
|
||||||
|
"stt_enabled": True,
|
||||||
|
"correlation_enabled": True,
|
||||||
|
"summaries_enabled": True,
|
||||||
|
"vocabulary_learning_enabled": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
_cache: dict[str, Any] = {}
|
||||||
|
_cache_ts: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
async def get_flags() -> dict[str, bool]:
|
||||||
|
"""Return the current feature flags, using the TTL cache when fresh."""
|
||||||
|
global _cache, _cache_ts
|
||||||
|
|
||||||
|
now = time.monotonic()
|
||||||
|
if _cache and (now - _cache_ts) < _TTL:
|
||||||
|
return dict(_cache)
|
||||||
|
|
||||||
|
try:
|
||||||
|
doc = await fstore.doc_get(_COLLECTION, _DOC_ID)
|
||||||
|
if doc:
|
||||||
|
merged = {**_DEFAULTS, **{k: bool(v) for k, v in doc.items() if k in _DEFAULTS}}
|
||||||
|
else:
|
||||||
|
merged = dict(_DEFAULTS)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Feature flags: could not read from Firestore ({e}), using defaults")
|
||||||
|
merged = dict(_DEFAULTS)
|
||||||
|
|
||||||
|
_cache = merged
|
||||||
|
_cache_ts = now
|
||||||
|
return dict(_cache)
|
||||||
|
|
||||||
|
|
||||||
|
async def set_flags(updates: dict[str, bool]) -> dict[str, bool]:
|
||||||
|
"""Write flag updates to Firestore and invalidate the cache."""
|
||||||
|
global _cache, _cache_ts
|
||||||
|
|
||||||
|
clean = {k: bool(v) for k, v in updates.items() if k in _DEFAULTS}
|
||||||
|
if not clean:
|
||||||
|
raise ValueError(f"No recognised flag keys in update: {list(updates)}")
|
||||||
|
|
||||||
|
await fstore.doc_set(_COLLECTION, _DOC_ID, clean)
|
||||||
|
_cache_ts = 0.0 # force re-read on next get_flags()
|
||||||
|
logger.info(f"Feature flags updated: {clean}")
|
||||||
|
return await get_flags()
|
||||||
@@ -1,10 +1,18 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import time as _time
|
||||||
from typing import Optional, Any
|
from typing import Optional, Any
|
||||||
import firebase_admin
|
import firebase_admin
|
||||||
from firebase_admin import credentials, firestore as fs
|
from firebase_admin import credentials, firestore as fs
|
||||||
|
from google.cloud.firestore_v1.base_query import FieldFilter
|
||||||
from app.config import settings
|
from app.config import settings
|
||||||
from app.internal.logger import logger
|
from app.internal.logger import logger
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# In-memory TTL cache for rarely-changing documents (systems, nodes config)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Key: "collection/doc_id" → (expires_at_monotonic, data_or_None)
|
||||||
|
_doc_cache: dict[str, tuple[float, Optional[dict]]] = {}
|
||||||
|
|
||||||
|
|
||||||
def _init_firebase():
|
def _init_firebase():
|
||||||
if firebase_admin._apps:
|
if firebase_admin._apps:
|
||||||
@@ -51,7 +59,25 @@ async def collection_list(collection: str, **filters) -> list[dict]:
|
|||||||
def _query():
|
def _query():
|
||||||
ref = db.collection(collection)
|
ref = db.collection(collection)
|
||||||
for field, value in filters.items():
|
for field, value in filters.items():
|
||||||
ref = ref.where(field, "==", value)
|
ref = ref.where(filter=FieldFilter(field, "==", value))
|
||||||
|
return [doc.to_dict() for doc in ref.stream()]
|
||||||
|
|
||||||
|
return await asyncio.to_thread(_query)
|
||||||
|
|
||||||
|
|
||||||
|
async def collection_where(
|
||||||
|
collection: str,
|
||||||
|
conditions: list[tuple[str, str, Any]],
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Query a collection with arbitrary where-clauses.
|
||||||
|
conditions: list of (field, op, value) — e.g. [("ended_at", ">=", cutoff_dt)]
|
||||||
|
Supports any Firestore operator: "==", "!=", "<", "<=", ">", ">=".
|
||||||
|
"""
|
||||||
|
def _query():
|
||||||
|
ref = db.collection(collection)
|
||||||
|
for field, op, value in conditions:
|
||||||
|
ref = ref.where(filter=FieldFilter(field, op, value))
|
||||||
return [doc.to_dict() for doc in ref.stream()]
|
return [doc.to_dict() for doc in ref.stream()]
|
||||||
|
|
||||||
return await asyncio.to_thread(_query)
|
return await asyncio.to_thread(_query)
|
||||||
@@ -60,3 +86,19 @@ async def collection_list(collection: str, **filters) -> list[dict]:
|
|||||||
async def doc_delete(collection: str, doc_id: str) -> None:
|
async def doc_delete(collection: str, doc_id: str) -> None:
|
||||||
ref = db.collection(collection).document(doc_id)
|
ref = db.collection(collection).document(doc_id)
|
||||||
await asyncio.to_thread(ref.delete)
|
await asyncio.to_thread(ref.delete)
|
||||||
|
|
||||||
|
|
||||||
|
async def doc_get_cached(collection: str, doc_id: str, ttl: float = 300.0) -> Optional[dict]:
|
||||||
|
"""
|
||||||
|
Like doc_get but backed by a short-lived in-memory TTL cache.
|
||||||
|
Use for documents that change rarely (systems config, node assignments).
|
||||||
|
Default TTL is 5 minutes — a write will be visible within that window.
|
||||||
|
"""
|
||||||
|
key = f"{collection}/{doc_id}"
|
||||||
|
now = _time.monotonic()
|
||||||
|
entry = _doc_cache.get(key)
|
||||||
|
if entry and now < entry[0]:
|
||||||
|
return entry[1]
|
||||||
|
data = await doc_get(collection, doc_id)
|
||||||
|
_doc_cache[key] = (now + ttl, data)
|
||||||
|
return data
|
||||||
|
|||||||
@@ -3,27 +3,42 @@ Hybrid incident correlation engine.
|
|||||||
|
|
||||||
Matching priority (in order):
|
Matching priority (in order):
|
||||||
|
|
||||||
1. Fast path — talkgroup + system match (any incident type, no time limit)
|
1. Fast path — talkgroup + system match with recency gate
|
||||||
Active-status gate is sufficient. If multiple active incidents share the same
|
Only considers incidents updated within `tg_fast_path_idle_minutes` (default 30 min).
|
||||||
talkgroup (e.g. busy shared channel), disambiguate by:
|
Rationale: a shared dispatch channel (e.g. "Yorktown PD – Dispatch") handles ALL
|
||||||
a) Unit overlap — strongest signal, officer assigned to incident
|
incidents for a department. Without the recency gate, every call on that channel
|
||||||
|
would pile into whatever incident was created first, for hours.
|
||||||
|
|
||||||
|
If multiple recent incidents share the same talkgroup, disambiguate by:
|
||||||
|
a) Unit overlap — strongest signal
|
||||||
b) Vehicle overlap — vehicle description shared across calls
|
b) Vehicle overlap — vehicle description shared across calls
|
||||||
c) Location proximity — geocoded coords closer to which incident
|
c) Location proximity — geocoded coords closer to which incident
|
||||||
d) Embedding similarity against each candidate's centroid (tiebreaker)
|
d) Embedding similarity against each candidate's centroid (tiebreaker)
|
||||||
Falls back to most-recently-updated on tie.
|
Falls back to most-recently-updated on tie.
|
||||||
|
|
||||||
|
Dispatch-channel strictness: when the talkgroup name contains "dispatch", the
|
||||||
|
fallback inside `_call_fits_incident` requires positive evidence (unit overlap
|
||||||
|
or location proximity) to link. Without this, every ungeocoded call on a
|
||||||
|
dispatch backbone would link to the one active incident on that channel.
|
||||||
|
|
||||||
|
Thin calls (no units/vehicles/coords) skip scene verification and link to the
|
||||||
|
most recently updated incident on this TGID — but only if that incident is
|
||||||
|
within the recency window.
|
||||||
|
|
||||||
2. Location path — geocoded coords within `location_proximity_km` (time-limited)
|
2. Location path — geocoded coords within `location_proximity_km` (time-limited)
|
||||||
Primary mutual-aid signal: EMS + police at the same scene.
|
Primary mutual-aid signal: EMS + police at the same scene.
|
||||||
|
|
||||||
3. Slow path — embedding cosine similarity (time-limited, same type only)
|
3. Slow path — embedding cosine similarity (time-limited, same type only)
|
||||||
Requires similarity >= threshold AND location within 4× proximity radius.
|
Requires similarity >= threshold AND location within 4× proximity radius.
|
||||||
Never fires alone — location corroboration is mandatory.
|
High-confidence tier (>= embedding_no_location_threshold) can match without
|
||||||
|
location when geocoding failed on either side.
|
||||||
|
|
||||||
Calls with no incident_type skip new-incident creation but still run paths 1–3,
|
Calls with no incident_type skip new-incident creation but still run paths 1–3,
|
||||||
so unclassified calls (short transport end, "en route", etc.) can link to an
|
so unclassified calls (short transport end, "en route", etc.) can link to an
|
||||||
existing incident via talkgroup match.
|
existing incident via talkgroup match.
|
||||||
"""
|
"""
|
||||||
import math
|
import math
|
||||||
|
import re
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime, timezone, timedelta
|
from datetime import datetime, timezone, timedelta
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
@@ -31,6 +46,113 @@ from app.internal.logger import logger
|
|||||||
from app.internal import firestore as fstore
|
from app.internal import firestore as fstore
|
||||||
from app.config import settings
|
from app.config import settings
|
||||||
|
|
||||||
|
_DISPATCH_TG_RE = re.compile(r"\bdispatch\b|\bdisp\b", re.IGNORECASE)
|
||||||
|
|
||||||
|
# Matches route/road identifiers in location strings for cross-system parent detection.
|
||||||
|
# Groups: numbered routes (Route 202, NY-9, US-6, I-87, CR-35) and named parkways/highways.
|
||||||
|
_ROAD_RE = re.compile(
|
||||||
|
r"\b(?:route|rt\.?|rte\.?|us[-\s]?|state\s*route\s*|ny[-\s]?|i[-\s]?|cr[-\s]?|county\s*road\s*)\s*\d+\b"
|
||||||
|
r"|\b(?:tsp|taconic|thruway|parkway|turnpike|interstate)\b"
|
||||||
|
r"|\b\w+(?:\s+\w+)?\s+(?:street|avenue|road|drive|boulevard|lane|court|place|highway|pkwy|blvd|ave|rd|st|dr)\b",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_road_ids(text: str) -> set[str]:
|
||||||
|
"""
|
||||||
|
Extract normalised road/route identifiers from a location string.
|
||||||
|
e.g. "suspect east on Route 202" → {"route 202"}
|
||||||
|
"at Main Street and Oak Ave" → {"main street", "oak ave"}
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
re.sub(r"[\s.\-]+", " ", m.group().lower()).strip()
|
||||||
|
for m in _ROAD_RE.finditer(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _location_mentions_road_overlap(new_location: str, inc_mentions: list[str]) -> bool:
|
||||||
|
"""True if the new call's location shares any road identifier with the incident's history."""
|
||||||
|
if not new_location or not inc_mentions:
|
||||||
|
return False
|
||||||
|
new_roads = _extract_road_ids(new_location)
|
||||||
|
if not new_roads:
|
||||||
|
return False
|
||||||
|
inc_roads: set[str] = set()
|
||||||
|
for mention in inc_mentions:
|
||||||
|
inc_roads |= _extract_road_ids(mention)
|
||||||
|
return bool(new_roads & inc_roads)
|
||||||
|
|
||||||
|
|
||||||
|
def _operational_types_compatible(type_a: Optional[str], type_b: Optional[str]) -> bool:
|
||||||
|
"""Police+police, ems+ems, fire+fire all match. Police+ems for co-response. Fire+anything for mutual aid."""
|
||||||
|
if not type_a or not type_b:
|
||||||
|
return False
|
||||||
|
if type_a == type_b:
|
||||||
|
return True
|
||||||
|
compatible_pairs = {frozenset({"police", "ems"}), frozenset({"fire", "ems"}), frozenset({"fire", "police"})}
|
||||||
|
return frozenset({type_a, type_b}) in compatible_pairs
|
||||||
|
|
||||||
|
# Tags that unambiguously imply a specific incident type, used as a fallback
|
||||||
|
# when GPT returns incident_type=None (typically due to missing talkgroup context).
|
||||||
|
# Only high-confidence, type-specific tags are listed — generic tags like
|
||||||
|
# "welfare-check" or "suspicious-activity" are omitted to avoid false typing.
|
||||||
|
_TAG_TYPE_HINTS: dict[str, str] = {
|
||||||
|
"active-fire": "fire",
|
||||||
|
"working-fire": "fire",
|
||||||
|
"structure-fire": "fire",
|
||||||
|
"brush-fire": "fire",
|
||||||
|
"smoke-investigation": "fire",
|
||||||
|
"fire-alarm": "fire",
|
||||||
|
"cardiac-arrest": "ems",
|
||||||
|
"unresponsive": "ems",
|
||||||
|
"medical-assistance": "ems",
|
||||||
|
"transport": "ems",
|
||||||
|
"courtesy-transport": "ems",
|
||||||
|
"mvc": "accident",
|
||||||
|
"mva": "accident",
|
||||||
|
"two-car-mva": "accident",
|
||||||
|
"traffic-stop": "police",
|
||||||
|
"shots-fired": "police",
|
||||||
|
"vehicle-pursuit": "police",
|
||||||
|
"pursuit": "police",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_type_from_tags(tags: list[str]) -> Optional[str]:
|
||||||
|
"""Return an incident type inferred from tags, or None if ambiguous."""
|
||||||
|
for tag in tags:
|
||||||
|
t = _TAG_TYPE_HINTS.get(tag.lower())
|
||||||
|
if t:
|
||||||
|
return t
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _tag_to_title(tag: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert a hyphenated tag to title case without the str.title() apostrophe bug.
|
||||||
|
e.g. "lower-macy's" → "Lower Macy's" (not "Lower Macy'S")
|
||||||
|
"""
|
||||||
|
return " ".join(w.capitalize() for w in tag.replace("-", " ").split())
|
||||||
|
|
||||||
|
|
||||||
|
def _is_dispatch_channel(talkgroup_name: Optional[str]) -> bool:
|
||||||
|
"""True when the talkgroup is a shared dispatch backbone (not a tactical/working channel)."""
|
||||||
|
if not talkgroup_name:
|
||||||
|
return False
|
||||||
|
return bool(_DISPATCH_TG_RE.search(talkgroup_name))
|
||||||
|
|
||||||
|
|
||||||
|
def _incident_idle_minutes(inc: dict, now: datetime) -> float:
|
||||||
|
"""Minutes since the incident was last updated (or started). Returns 9999 on parse error."""
|
||||||
|
try:
|
||||||
|
raw = inc.get("updated_at") or inc.get("started_at") or ""
|
||||||
|
dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00"))
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=timezone.utc)
|
||||||
|
return (now - dt).total_seconds() / 60
|
||||||
|
except Exception:
|
||||||
|
return 9999.0
|
||||||
|
|
||||||
|
|
||||||
# ─────────────────────────────────────────────────────────────────────────────
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
# Public entry point
|
# Public entry point
|
||||||
@@ -46,50 +168,178 @@ async def correlate_call(
|
|||||||
incident_type: Optional[str],
|
incident_type: Optional[str],
|
||||||
location: Optional[str] = None,
|
location: Optional[str] = None,
|
||||||
location_coords: Optional[dict] = None,
|
location_coords: Optional[dict] = None,
|
||||||
|
reference_time: Optional[datetime] = None,
|
||||||
|
create_if_new: bool = True,
|
||||||
|
units: Optional[list[str]] = None,
|
||||||
|
vehicles: Optional[list[str]] = None,
|
||||||
|
cleared_units: Optional[list[str]] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Link call_id to an existing incident or create a new one.
|
Link call_id to an existing incident or create a new one.
|
||||||
Returns the incident_id, or None if skipped (no type and no talkgroup match).
|
|
||||||
|
reference_time — time anchor for the time-limited paths (location + slow).
|
||||||
|
Defaults to now. Pass call.started_at when re-correlating
|
||||||
|
orphaned calls so the window is anchored to when the call
|
||||||
|
actually happened, not when the sweep runs.
|
||||||
|
create_if_new — when False, skip new-incident creation (re-correlation only
|
||||||
|
links to existing incidents; it never creates new ones).
|
||||||
|
|
||||||
|
Returns the incident_id, or None if skipped.
|
||||||
"""
|
"""
|
||||||
now = datetime.now(timezone.utc)
|
now = reference_time or datetime.now(timezone.utc)
|
||||||
window = timedelta(hours=settings.correlation_window_hours)
|
window = timedelta(hours=settings.correlation_window_hours)
|
||||||
|
|
||||||
# Fetch all active incidents cross-type (mutual aid needs this)
|
# Fetch all active incidents cross-type (mutual aid needs this)
|
||||||
all_active = await fstore.collection_list("incidents", status="active")
|
all_active = await fstore.collection_list("incidents", status="active")
|
||||||
recent = [inc for inc in all_active if _within_window(inc, now, window)]
|
recent = [inc for inc in all_active if _within_window_of(inc, now, window)]
|
||||||
|
|
||||||
# Fetch call doc once — reused for disambiguation, embedding merge, unit accumulation
|
# Fetch call doc once — reused for disambiguation, embedding merge, unit accumulation
|
||||||
call_doc = await fstore.doc_get("calls", call_id) or {}
|
call_doc = await fstore.doc_get("calls", call_id) or {}
|
||||||
call_embedding: Optional[list] = call_doc.get("embedding")
|
call_embedding: Optional[list] = call_doc.get("embedding")
|
||||||
call_units: list[str] = call_doc.get("units") or []
|
# Prefer explicitly passed units/vehicles (per-scene, from intelligence extraction)
|
||||||
call_vehicles: list[str] = call_doc.get("vehicles") or []
|
# over the call doc, which merges units from ALL scenes in a multi-scene recording.
|
||||||
|
# Falling back to the call doc is correct for recorrelation sweeps where we have no
|
||||||
|
# scene-level breakdown.
|
||||||
|
call_units: list[str] = units if units is not None else (call_doc.get("units") or [])
|
||||||
|
call_vehicles: list[str] = vehicles if vehicles is not None else (call_doc.get("vehicles") or [])
|
||||||
|
call_cleared: list[str] = cleared_units if cleared_units is not None else (call_doc.get("cleared_units") or [])
|
||||||
call_severity: str = call_doc.get("severity") or "unknown"
|
call_severity: str = call_doc.get("severity") or "unknown"
|
||||||
# Use passed coords first (freshly geocoded), fall back to what's on the call doc
|
# Use passed coords first (freshly geocoded), fall back to what's on the call doc
|
||||||
coords: Optional[dict] = location_coords or call_doc.get("location_coords")
|
coords: Optional[dict] = location_coords or call_doc.get("location_coords")
|
||||||
|
|
||||||
matched_incident: Optional[dict] = None
|
matched_incident: Optional[dict] = None
|
||||||
|
corr_debug: dict = {}
|
||||||
|
|
||||||
# ── 1. Fast path: talkgroup match (any type, no time limit) ──────────────
|
# A "thin" call carries no scene-identifying information — it is a pure
|
||||||
|
# status transmission (10-4, en route, acknowledgement). Detected by the
|
||||||
|
# absence of extracted units, vehicles, AND geocoded coordinates. Thin
|
||||||
|
# calls should link to wherever the last active conversation on this TGID
|
||||||
|
# was happening rather than running the full scene-verification logic.
|
||||||
|
is_thin_call = not call_units and not call_vehicles and not coords
|
||||||
|
|
||||||
|
# ── 1. Fast path: talkgroup match with recency gate ──────────────────────
|
||||||
|
#
|
||||||
|
# Only considers incidents updated within tg_fast_path_idle_minutes (default 30 min).
|
||||||
|
# A shared dispatch channel handles every incident for a department — without the
|
||||||
|
# recency gate, a 5-hour-old incident would absorb every new call on that channel.
|
||||||
|
#
|
||||||
|
# Two behaviours depending on call substance:
|
||||||
|
# • Thin call → link to the most-recently-updated recent TGID incident.
|
||||||
|
# • Substantive → verify via _call_fits_incident before linking.
|
||||||
if talkgroup_id is not None and system_id:
|
if talkgroup_id is not None and system_id:
|
||||||
tg_str = str(talkgroup_id)
|
tg_str = str(talkgroup_id)
|
||||||
|
is_dispatch = _is_dispatch_channel(talkgroup_name)
|
||||||
|
|
||||||
tg_matches = [
|
tg_matches = [
|
||||||
inc for inc in all_active
|
inc for inc in all_active
|
||||||
if system_id in (inc.get("system_ids") or [])
|
if system_id in (inc.get("system_ids") or [])
|
||||||
and tg_str in (inc.get("talkgroup_ids") or [])
|
and tg_str in (inc.get("talkgroup_ids") or [])
|
||||||
]
|
]
|
||||||
if len(tg_matches) == 1:
|
# Apply recency gate — only incidents active within the rolling window.
|
||||||
matched_incident = tg_matches[0]
|
tg_recent = [
|
||||||
|
inc for inc in tg_matches
|
||||||
|
if _incident_idle_minutes(inc, now) <= settings.tg_fast_path_idle_minutes
|
||||||
|
]
|
||||||
|
|
||||||
|
if tg_matches and not tg_recent:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Correlator fast-path: call {call_id} → {tg_matches[0]['incident_id']}"
|
f"Correlator fast-path skipped: all {len(tg_matches)} TGID incident(s) idle "
|
||||||
)
|
f"> {settings.tg_fast_path_idle_minutes}min; falling through to location/slow path"
|
||||||
elif len(tg_matches) > 1:
|
|
||||||
matched_incident = _disambiguate(
|
|
||||||
tg_matches, call_units, call_vehicles, coords, call_embedding
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if tg_recent and is_thin_call:
|
||||||
|
# Status/ack call — no scene data to reason about.
|
||||||
|
# Attach to whichever recent incident was most recently active on this TGID.
|
||||||
|
matched_incident = max(tg_recent, key=lambda inc: inc.get("updated_at", ""))
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "fast/thin",
|
||||||
|
"corr_incident_idle_min": round(_incident_idle_minutes(matched_incident, now), 1),
|
||||||
|
}
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Correlator fast-path (disambig {len(tg_matches)} candidates): "
|
f"Correlator fast-path (thin→last TGID incident): "
|
||||||
f"call {call_id} → {matched_incident['incident_id']}"
|
f"call {call_id} → {matched_incident['incident_id']}"
|
||||||
)
|
)
|
||||||
|
elif len(tg_recent) == 1:
|
||||||
|
candidate = tg_recent[0]
|
||||||
|
if _call_fits_incident(
|
||||||
|
candidate, call_units, call_vehicles, coords,
|
||||||
|
settings.location_proximity_km, is_dispatch=is_dispatch,
|
||||||
|
):
|
||||||
|
matched_incident = candidate
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "fast/single",
|
||||||
|
"corr_incident_idle_min": round(_incident_idle_minutes(candidate, now), 1),
|
||||||
|
}
|
||||||
|
logger.info(
|
||||||
|
f"Correlator fast-path: call {call_id} → {candidate['incident_id']}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Correlator fast-path skipped: call {call_id} — different scene "
|
||||||
|
f"from {candidate['incident_id']}; will attempt new incident"
|
||||||
|
)
|
||||||
|
elif len(tg_recent) > 1:
|
||||||
|
candidate = _disambiguate(
|
||||||
|
tg_recent, call_units, call_vehicles, coords, call_embedding
|
||||||
|
)
|
||||||
|
# Disambiguate picks the best candidate, but still verify the call
|
||||||
|
# actually fits before committing — a new unrelated call on a busy
|
||||||
|
# dispatch channel should create its own incident, not be force-merged.
|
||||||
|
if _call_fits_incident(
|
||||||
|
candidate, call_units, call_vehicles, coords,
|
||||||
|
settings.location_proximity_km, is_dispatch=is_dispatch,
|
||||||
|
):
|
||||||
|
matched_incident = candidate
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "fast/disambig",
|
||||||
|
"corr_incident_idle_min": round(_incident_idle_minutes(candidate, now), 1),
|
||||||
|
"corr_candidates": len(tg_recent),
|
||||||
|
}
|
||||||
|
logger.info(
|
||||||
|
f"Correlator fast-path (disambig {len(tg_recent)} candidates): "
|
||||||
|
f"call {call_id} → {candidate['incident_id']}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Correlator fast-path disambig: no candidate fits call {call_id} "
|
||||||
|
f"across {len(tg_recent)} incidents — will attempt new incident"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ── 1.5. Unit-continuity path: same officer, not reassigned ─────────────────
|
||||||
|
#
|
||||||
|
# Handles long calls (bookings, transports, late scene clearance) where the
|
||||||
|
# 90-min idle gate has fired but the officer is still on the same call.
|
||||||
|
# Searches ALL active incidents — no idle gate, no time limit.
|
||||||
|
#
|
||||||
|
# Reassignment guard: if the same unit appears in a MORE recently updated
|
||||||
|
# incident, the officer has moved on and we don't link back to the old one.
|
||||||
|
# This correctly handles officers dispatched to a second call mid-shift.
|
||||||
|
if not matched_incident and call_units and system_id:
|
||||||
|
call_unit_set = set(call_units)
|
||||||
|
unit_candidates = [
|
||||||
|
inc for inc in all_active
|
||||||
|
if system_id in (inc.get("system_ids") or [])
|
||||||
|
and call_unit_set & set(inc.get("units") or [])
|
||||||
|
]
|
||||||
|
if unit_candidates:
|
||||||
|
best_unit_inc = max(unit_candidates, key=lambda i: i.get("updated_at", ""))
|
||||||
|
reassigned_away = any(
|
||||||
|
inc["incident_id"] != best_unit_inc["incident_id"]
|
||||||
|
and call_unit_set & set(inc.get("units") or [])
|
||||||
|
and inc.get("updated_at", "") > best_unit_inc.get("updated_at", "")
|
||||||
|
for inc in all_active
|
||||||
|
)
|
||||||
|
if not reassigned_away:
|
||||||
|
matched_incident = best_unit_inc
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "unit-continuity",
|
||||||
|
"corr_incident_idle_min": round(_incident_idle_minutes(best_unit_inc, now), 1),
|
||||||
|
}
|
||||||
|
logger.info(
|
||||||
|
f"Correlator unit-continuity: call {call_id} → "
|
||||||
|
f"{best_unit_inc['incident_id']} "
|
||||||
|
f"(idle {_incident_idle_minutes(best_unit_inc, now):.0f}min)"
|
||||||
|
)
|
||||||
|
|
||||||
# ── 2. Location path: proximity match (time-limited, cross-type) ─────────
|
# ── 2. Location path: proximity match (time-limited, cross-type) ─────────
|
||||||
if not matched_incident and coords:
|
if not matched_incident and coords:
|
||||||
@@ -103,13 +353,61 @@ async def correlate_call(
|
|||||||
)
|
)
|
||||||
if dist_km <= settings.location_proximity_km:
|
if dist_km <= settings.location_proximity_km:
|
||||||
matched_incident = inc
|
matched_incident = inc
|
||||||
|
corr_debug = {"corr_path": "location", "corr_distance_km": round(dist_km, 3)}
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Correlator location-path: call {call_id} → {inc['incident_id']} "
|
f"Correlator location-path: call {call_id} → {inc['incident_id']} "
|
||||||
f"(dist={dist_km:.2f}km)"
|
f"(dist={dist_km:.2f}km)"
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
# ── 3. Slow path: embedding + location corroboration (time-limited, same type) ──
|
# ── 2.5. Cross-TG path: same department, overlapping units, moderate similarity ──
|
||||||
|
#
|
||||||
|
# Catches pursuits / searches that span multiple talkgroup IDs within the same
|
||||||
|
# department (e.g. dispatch → tactical → geographic channel). The fast path
|
||||||
|
# is TGID-scoped so it never links these. Two conditions together provide
|
||||||
|
# strong evidence of the same scene without needing location:
|
||||||
|
# • 2+ shared unit IDs (same officers working the same call)
|
||||||
|
# • embedding similarity >= cross-TG threshold (same subject matter)
|
||||||
|
# Requiring 2+ shared units prevents single-officer false positives.
|
||||||
|
if not matched_incident and call_embedding and incident_type and call_units and system_id:
|
||||||
|
call_unit_set = set(call_units)
|
||||||
|
best_cross_score = 0.0
|
||||||
|
best_cross_inc: Optional[dict] = None
|
||||||
|
for inc in recent:
|
||||||
|
if inc.get("type") != incident_type:
|
||||||
|
continue
|
||||||
|
if system_id not in (inc.get("system_ids") or []):
|
||||||
|
continue
|
||||||
|
inc_units_set = set(inc.get("units") or [])
|
||||||
|
if len(call_unit_set & inc_units_set) < 2:
|
||||||
|
continue
|
||||||
|
inc_embedding = inc.get("embedding")
|
||||||
|
if not inc_embedding:
|
||||||
|
continue
|
||||||
|
sim = _cosine_similarity(call_embedding, inc_embedding)
|
||||||
|
if sim > best_cross_score:
|
||||||
|
best_cross_score = sim
|
||||||
|
best_cross_inc = inc
|
||||||
|
if best_cross_inc and best_cross_score >= settings.embedding_cross_tg_threshold:
|
||||||
|
matched_incident = best_cross_inc
|
||||||
|
shared = len(call_unit_set & set(best_cross_inc.get("units") or []))
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "cross-tg",
|
||||||
|
"corr_score": round(best_cross_score, 4),
|
||||||
|
"corr_shared_units": shared,
|
||||||
|
}
|
||||||
|
logger.info(
|
||||||
|
f"Correlator cross-TG path: call {call_id} → {best_cross_inc['incident_id']} "
|
||||||
|
f"(sim={best_cross_score:.3f}, shared_units={shared})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ── 3. Slow path: embedding similarity (time-limited, same type) ──────────
|
||||||
|
#
|
||||||
|
# Two tiers:
|
||||||
|
# ① embedding_similarity_threshold + location corroboration (standard)
|
||||||
|
# ② embedding_no_location_threshold alone — when geocoding failed on
|
||||||
|
# either side but the transcript content is semantically very close.
|
||||||
|
# A strong embedding match beats a missing geocode.
|
||||||
if not matched_incident and call_embedding and incident_type:
|
if not matched_incident and call_embedding and incident_type:
|
||||||
best_score = 0.0
|
best_score = 0.0
|
||||||
best_inc: Optional[dict] = None
|
best_inc: Optional[dict] = None
|
||||||
@@ -133,11 +431,27 @@ async def correlate_call(
|
|||||||
)
|
)
|
||||||
if dist_km <= settings.location_proximity_km * 4:
|
if dist_km <= settings.location_proximity_km * 4:
|
||||||
matched_incident = best_inc
|
matched_incident = best_inc
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "slow",
|
||||||
|
"corr_score": round(best_score, 4),
|
||||||
|
"corr_distance_km": round(dist_km, 3),
|
||||||
|
}
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Correlator slow-path: call {call_id} → {best_inc['incident_id']} "
|
f"Correlator slow-path: call {call_id} → {best_inc['incident_id']} "
|
||||||
f"(sim={best_score:.3f}, dist={dist_km:.2f}km)"
|
f"(sim={best_score:.3f}, dist={dist_km:.2f}km)"
|
||||||
)
|
)
|
||||||
# No coords available → slow path alone is not enough; skip
|
elif best_score >= settings.embedding_no_location_threshold:
|
||||||
|
# High-confidence semantic match; geocode unavailable on one or
|
||||||
|
# both sides — content similarity alone is sufficient evidence.
|
||||||
|
matched_incident = best_inc
|
||||||
|
corr_debug = {
|
||||||
|
"corr_path": "slow/no-location",
|
||||||
|
"corr_score": round(best_score, 4),
|
||||||
|
}
|
||||||
|
logger.info(
|
||||||
|
f"Correlator slow-path (high-confidence, no location): "
|
||||||
|
f"call {call_id} → {best_inc['incident_id']} (sim={best_score:.3f})"
|
||||||
|
)
|
||||||
|
|
||||||
# ── Update existing or create new ────────────────────────────────────────
|
# ── Update existing or create new ────────────────────────────────────────
|
||||||
if matched_incident:
|
if matched_incident:
|
||||||
@@ -146,18 +460,97 @@ async def correlate_call(
|
|||||||
matched_incident, call_id, talkgroup_id, system_id, tags,
|
matched_incident, call_id, talkgroup_id, system_id, tags,
|
||||||
location, location_coords, call_units, call_vehicles, call_embedding, now,
|
location, location_coords, call_units, call_vehicles, call_embedding, now,
|
||||||
talkgroup_name=talkgroup_name, incident_type=incident_type,
|
talkgroup_name=talkgroup_name, incident_type=incident_type,
|
||||||
|
cleared_units=call_cleared,
|
||||||
)
|
)
|
||||||
elif incident_type:
|
elif create_if_new:
|
||||||
incident_id = await _create_incident(
|
# If GPT returned no type (missing talkgroup context is common), attempt
|
||||||
call_id, incident_type, talkgroup_id, talkgroup_name, system_id,
|
# to recover a type from the extracted tags before giving up on creation.
|
||||||
tags, location, location_coords,
|
if not incident_type and tags:
|
||||||
call_units, call_vehicles, call_embedding, call_severity, now,
|
incident_type = _infer_type_from_tags(tags)
|
||||||
)
|
if incident_type:
|
||||||
|
logger.info(
|
||||||
|
f"Correlator: inferred incident_type={incident_type!r} from tags "
|
||||||
|
f"{tags} for call {call_id} (no GPT type)"
|
||||||
|
)
|
||||||
|
if not incident_type:
|
||||||
|
# No type and none inferred — nothing to create
|
||||||
|
return None
|
||||||
|
|
||||||
|
# ── Cross-system parent detection ─────────────────────────────────────
|
||||||
|
# Before creating a standalone incident, check whether this call belongs
|
||||||
|
# to an incident already opened by a different agency (multi-agency chase,
|
||||||
|
# mutual aid, etc.). If a parent candidate is found:
|
||||||
|
# • The existing candidate is demoted to a child (incident_type → "child")
|
||||||
|
# • A new master shell is created linking both children
|
||||||
|
# • The new call's incident is created as a second child of the master
|
||||||
|
cross_parent: Optional[dict] = None
|
||||||
|
if system_id:
|
||||||
|
cross_parent = await _find_cross_system_parent(
|
||||||
|
system_id=system_id,
|
||||||
|
incident_type=incident_type,
|
||||||
|
location=location,
|
||||||
|
location_coords=coords,
|
||||||
|
call_embedding=call_embedding,
|
||||||
|
recent=recent,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cross_parent:
|
||||||
|
existing_child_id = cross_parent["incident_id"]
|
||||||
|
existing_master_id = cross_parent.get("parent_incident_id")
|
||||||
|
|
||||||
|
# Create the new agency's child incident first
|
||||||
|
incident_id = await _create_incident(
|
||||||
|
call_id, incident_type, talkgroup_id, talkgroup_name, system_id,
|
||||||
|
tags, location, location_coords,
|
||||||
|
call_units, call_vehicles, call_embedding, call_severity, now,
|
||||||
|
)
|
||||||
|
|
||||||
|
if existing_master_id:
|
||||||
|
# Candidate is already a child — link new child to the existing master
|
||||||
|
await _demote_to_child(incident_id, existing_master_id)
|
||||||
|
await _add_child_to_master(existing_master_id, incident_id, now)
|
||||||
|
corr_debug["corr_path"] = "new/cross-system-child"
|
||||||
|
logger.info(
|
||||||
|
f"Correlator cross-system: call {call_id} → new child {incident_id} "
|
||||||
|
f"under existing master {existing_master_id}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Candidate is a standalone master — create master shell, demote both
|
||||||
|
master_id = await _create_master_incident(
|
||||||
|
first_child_id=existing_child_id,
|
||||||
|
second_child_id=incident_id,
|
||||||
|
operational_type=incident_type,
|
||||||
|
location=cross_parent.get("location") or location,
|
||||||
|
location_coords=cross_parent.get("location_coords") or coords,
|
||||||
|
now=now,
|
||||||
|
)
|
||||||
|
await _demote_to_child(existing_child_id, master_id)
|
||||||
|
await _demote_to_child(incident_id, master_id)
|
||||||
|
corr_debug["corr_path"] = "new/cross-system-master"
|
||||||
|
logger.info(
|
||||||
|
f"Correlator cross-system: created master {master_id}, "
|
||||||
|
f"demoted {existing_child_id} + new {incident_id} as children"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Normal single-agency incident creation
|
||||||
|
incident_id = await _create_incident(
|
||||||
|
call_id, incident_type, talkgroup_id, talkgroup_name, system_id,
|
||||||
|
tags, location, location_coords,
|
||||||
|
call_units, call_vehicles, call_embedding, call_severity, now,
|
||||||
|
)
|
||||||
|
corr_debug["corr_path"] = "new"
|
||||||
else:
|
else:
|
||||||
# Unclassified call, no talkgroup match found — nothing to do
|
# Creation suppressed (re-correlation sweep) — nothing to do
|
||||||
return None
|
return None
|
||||||
|
|
||||||
await fstore.doc_set("calls", call_id, {"incident_id": incident_id})
|
# Persist the correlation decision to the call document so it can be
|
||||||
|
# inspected in Firestore or the admin UI without log-scraping.
|
||||||
|
if corr_debug:
|
||||||
|
try:
|
||||||
|
await fstore.doc_set("calls", call_id, corr_debug)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not write corr_debug for call {call_id}: {e}")
|
||||||
|
|
||||||
return incident_id
|
return incident_id
|
||||||
|
|
||||||
|
|
||||||
@@ -165,14 +558,19 @@ async def correlate_call(
|
|||||||
# Internal helpers
|
# Internal helpers
|
||||||
# ─────────────────────────────────────────────────────────────────────────────
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
def _within_window(inc: dict, now: datetime, window: timedelta) -> bool:
|
def _within_window_of(inc: dict, anchor: datetime, window: timedelta) -> bool:
|
||||||
|
"""
|
||||||
|
True if the incident's started_at is within `window` of `anchor` in either
|
||||||
|
direction. Using started_at (not updated_at) means re-correlation anchored
|
||||||
|
to a call's started_at correctly matches incidents created shortly *after*
|
||||||
|
that call (e.g. a welfare check at T+0 vs. an incident created at T+15m).
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
dt = datetime.fromisoformat(
|
raw = inc.get("started_at") or inc.get("updated_at") or ""
|
||||||
str(inc.get("updated_at", "")).replace("Z", "+00:00")
|
dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00"))
|
||||||
)
|
|
||||||
if dt.tzinfo is None:
|
if dt.tzinfo is None:
|
||||||
dt = dt.replace(tzinfo=timezone.utc)
|
dt = dt.replace(tzinfo=timezone.utc)
|
||||||
return (now - dt) <= window
|
return abs((anchor - dt).total_seconds()) <= window.total_seconds()
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -239,6 +637,63 @@ def _disambiguate(
|
|||||||
return best
|
return best
|
||||||
|
|
||||||
|
|
||||||
|
def _call_fits_incident(
|
||||||
|
inc: dict,
|
||||||
|
call_units: list[str],
|
||||||
|
call_vehicles: list[str],
|
||||||
|
call_coords: Optional[dict],
|
||||||
|
proximity_km: float,
|
||||||
|
is_dispatch: bool = False,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Return True if this call plausibly belongs to the given incident.
|
||||||
|
|
||||||
|
Positive signals (unit/vehicle overlap, location proximity) are always
|
||||||
|
respected. The fallback — when there is no evidence either way — depends
|
||||||
|
on channel type:
|
||||||
|
|
||||||
|
• Tactical / working channel (is_dispatch=False): default True (link).
|
||||||
|
A working channel is dedicated to one scene; no evidence of separation
|
||||||
|
means they're probably the same call.
|
||||||
|
|
||||||
|
• Dispatch channel (is_dispatch=True): default False (create new).
|
||||||
|
A dispatch channel carries every incident for a department. Linking
|
||||||
|
without positive evidence would merge unrelated incidents whenever
|
||||||
|
geocoding fails (which is common for partial street addresses).
|
||||||
|
|
||||||
|
Thin calls (no units/vehicles/coords) never reach this function —
|
||||||
|
they're intercepted by the is_thin_call branch above.
|
||||||
|
"""
|
||||||
|
# Unit overlap is the strongest positive signal: same officers = same call.
|
||||||
|
inc_units = set(inc.get("units") or [])
|
||||||
|
if inc_units and call_units and any(u in inc_units for u in call_units):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Vehicle overlap: same vehicle description across calls → same scene.
|
||||||
|
inc_vehicles = set(inc.get("vehicles") or [])
|
||||||
|
if inc_vehicles and call_vehicles and any(v in inc_vehicles for v in call_vehicles):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# When both sides have geocoded coordinates, distance is the tiebreaker.
|
||||||
|
inc_coords = inc.get("location_coords")
|
||||||
|
if call_coords and inc_coords:
|
||||||
|
dist_km = _haversine_km(
|
||||||
|
call_coords["lat"], call_coords["lng"],
|
||||||
|
inc_coords["lat"], inc_coords["lng"],
|
||||||
|
)
|
||||||
|
if dist_km <= proximity_km:
|
||||||
|
return True
|
||||||
|
# Different location AND no unit/vehicle overlap → different incident.
|
||||||
|
return False
|
||||||
|
|
||||||
|
# No geocoded location on one or both sides.
|
||||||
|
# On a tactical/working channel, default to linking (conservative — channel
|
||||||
|
# is dedicated to one scene so no evidence of separation ≈ same scene).
|
||||||
|
# On a dispatch channel, require positive evidence — without it we risk
|
||||||
|
# pulling every ungeocoded call in a shift into the same incident.
|
||||||
|
return not is_dispatch
|
||||||
|
|
||||||
|
|
||||||
async def _update_incident(
|
async def _update_incident(
|
||||||
inc: dict,
|
inc: dict,
|
||||||
call_id: str,
|
call_id: str,
|
||||||
@@ -253,6 +708,7 @@ async def _update_incident(
|
|||||||
now: datetime,
|
now: datetime,
|
||||||
talkgroup_name: Optional[str] = None,
|
talkgroup_name: Optional[str] = None,
|
||||||
incident_type: Optional[str] = None,
|
incident_type: Optional[str] = None,
|
||||||
|
cleared_units: Optional[list[str]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
incident_id = inc["incident_id"]
|
incident_id = inc["incident_id"]
|
||||||
|
|
||||||
@@ -272,6 +728,19 @@ async def _update_incident(
|
|||||||
merged_units = list(dict.fromkeys((inc.get("units") or []) + call_units))
|
merged_units = list(dict.fromkeys((inc.get("units") or []) + call_units))
|
||||||
merged_vehicles = list(dict.fromkeys((inc.get("vehicles") or []) + call_vehicles))
|
merged_vehicles = list(dict.fromkeys((inc.get("vehicles") or []) + call_vehicles))
|
||||||
|
|
||||||
|
# Unit activity tracking: units_active / units_cleared
|
||||||
|
# units_active = units currently on scene; units_cleared = units back in service
|
||||||
|
units_active = list(inc.get("units_active") or [])
|
||||||
|
units_cleared = list(inc.get("units_cleared") or [])
|
||||||
|
for u in call_units:
|
||||||
|
if u not in units_cleared and u not in units_active:
|
||||||
|
units_active.append(u)
|
||||||
|
for u in (cleared_units or []):
|
||||||
|
if u in units_active:
|
||||||
|
units_active.remove(u)
|
||||||
|
if u not in units_cleared:
|
||||||
|
units_cleared.append(u)
|
||||||
|
|
||||||
location_mentions = list(inc.get("location_mentions") or [])
|
location_mentions = list(inc.get("location_mentions") or [])
|
||||||
if location and location not in location_mentions:
|
if location and location not in location_mentions:
|
||||||
location_mentions.append(location)
|
location_mentions.append(location)
|
||||||
@@ -288,6 +757,8 @@ async def _update_incident(
|
|||||||
"tags": merged_tags,
|
"tags": merged_tags,
|
||||||
"units": merged_units,
|
"units": merged_units,
|
||||||
"vehicles": merged_vehicles,
|
"vehicles": merged_vehicles,
|
||||||
|
"units_active": units_active,
|
||||||
|
"units_cleared": units_cleared,
|
||||||
"location_mentions": location_mentions,
|
"location_mentions": location_mentions,
|
||||||
"updated_at": now.isoformat(),
|
"updated_at": now.isoformat(),
|
||||||
"summary_stale": True,
|
"summary_stale": True,
|
||||||
@@ -298,22 +769,40 @@ async def _update_incident(
|
|||||||
if best_coords:
|
if best_coords:
|
||||||
updates["location_coords"] = best_coords
|
updates["location_coords"] = best_coords
|
||||||
|
|
||||||
|
# Update incident type when a re-classified call provides a concrete type.
|
||||||
|
# This handles the case where admin correction changes fire→police, etc.
|
||||||
|
if incident_type and incident_type != inc.get("type"):
|
||||||
|
updates["type"] = incident_type
|
||||||
|
|
||||||
# Re-evaluate title when a substantive call (classified incident_type) brings new tags.
|
# Re-evaluate title when a substantive call (classified incident_type) brings new tags.
|
||||||
# Routine status calls (type=None) do not clobber the title.
|
# Routine status calls (type=None) do not clobber the title.
|
||||||
if incident_type:
|
if incident_type:
|
||||||
content_tags = [t for t in tags if t != "auto-generated"]
|
content_tags = [t for t in tags if t != "auto-generated"]
|
||||||
primary_tag = content_tags[0].replace("-", " ").title() if content_tags else None
|
primary_tag = _tag_to_title(content_tags[0]) if content_tags else None
|
||||||
tg_label = (
|
tg_label = (
|
||||||
talkgroup_name
|
talkgroup_name
|
||||||
or (f"TGID {talkgroup_id}" if talkgroup_id else inc.get("title", "").split(" — ")[-1])
|
or (f"TGID {talkgroup_id}" if talkgroup_id else inc.get("title", "").split(" — ")[-1])
|
||||||
)
|
)
|
||||||
if primary_tag and best_location:
|
if primary_tag and best_location and primary_tag.lower() != best_location.lower():
|
||||||
updates["title"] = f"{primary_tag} at {best_location}"
|
updates["title"] = f"{primary_tag} at {best_location}"
|
||||||
elif primary_tag and tg_label:
|
elif primary_tag and tg_label:
|
||||||
updates["title"] = f"{primary_tag} — {tg_label}"
|
updates["title"] = f"{primary_tag} — {tg_label}"
|
||||||
elif primary_tag:
|
elif primary_tag:
|
||||||
updates["title"] = primary_tag
|
updates["title"] = primary_tag
|
||||||
|
|
||||||
|
# Signal-based auto-resolve: every tracked unit has cleared, none still active.
|
||||||
|
# Requires at least one unit to have explicitly signalled back-in-service so we
|
||||||
|
# don't fire on incidents where units were never tracked (no unit mentions at all).
|
||||||
|
if units_cleared and not units_active:
|
||||||
|
updates["status"] = "resolved"
|
||||||
|
await fstore.doc_set("incidents", incident_id, updates)
|
||||||
|
logger.info(
|
||||||
|
f"Correlator: signal-resolved incident {incident_id} "
|
||||||
|
f"(call {call_id} — all {len(units_cleared)} unit(s) clear)"
|
||||||
|
)
|
||||||
|
await maybe_resolve_parent(incident_id)
|
||||||
|
return
|
||||||
|
|
||||||
await fstore.doc_set("incidents", incident_id, updates)
|
await fstore.doc_set("incidents", incident_id, updates)
|
||||||
logger.info(f"Correlator: linked call {call_id} to incident {incident_id}")
|
logger.info(f"Correlator: linked call {call_id} to incident {incident_id}")
|
||||||
|
|
||||||
@@ -341,17 +830,18 @@ async def _create_incident(
|
|||||||
|
|
||||||
# Build a descriptive title from tags + location when available
|
# Build a descriptive title from tags + location when available
|
||||||
content_tags = [t for t in tags if t != "auto-generated"]
|
content_tags = [t for t in tags if t != "auto-generated"]
|
||||||
primary_tag = content_tags[0].replace("-", " ").title() if content_tags else None
|
primary_tag = _tag_to_title(content_tags[0]) if content_tags else None
|
||||||
if primary_tag and location:
|
if primary_tag and location and primary_tag.lower() != location.lower():
|
||||||
title = f"{primary_tag} at {location}"
|
title = f"{primary_tag} at {location}"
|
||||||
elif primary_tag:
|
elif primary_tag:
|
||||||
title = f"{primary_tag} — {tg_label}"
|
title = f"{primary_tag} — {tg_label}"
|
||||||
else:
|
else:
|
||||||
title = f"{incident_type.title()} — {tg_label}"
|
title = f"{_tag_to_title(incident_type)} — {tg_label}"
|
||||||
|
|
||||||
doc = {
|
doc = {
|
||||||
"incident_id": incident_id,
|
"incident_id": incident_id,
|
||||||
"title": title,
|
"title": title,
|
||||||
|
"incident_type": "master", # structural role; "child" set on demotion
|
||||||
"type": incident_type,
|
"type": incident_type,
|
||||||
"status": "active",
|
"status": "active",
|
||||||
"location": location,
|
"location": location,
|
||||||
@@ -362,6 +852,8 @@ async def _create_incident(
|
|||||||
"system_ids": [system_id] if system_id else [],
|
"system_ids": [system_id] if system_id else [],
|
||||||
"tags": tags + ["auto-generated"],
|
"tags": tags + ["auto-generated"],
|
||||||
"units": call_units,
|
"units": call_units,
|
||||||
|
"units_active": list(call_units),
|
||||||
|
"units_cleared": [],
|
||||||
"vehicles": call_vehicles,
|
"vehicles": call_vehicles,
|
||||||
"severity": call_severity,
|
"severity": call_severity,
|
||||||
"summary": None,
|
"summary": None,
|
||||||
@@ -392,6 +884,190 @@ def _merge_embedding_vecs(inc: dict, call_embedding: list[float]) -> dict:
|
|||||||
return {"embedding": call_embedding, "embedding_count": 1}
|
return {"embedding": call_embedding, "embedding_count": 1}
|
||||||
|
|
||||||
|
|
||||||
|
async def _create_master_incident(
|
||||||
|
first_child_id: str,
|
||||||
|
second_child_id: str,
|
||||||
|
operational_type: str,
|
||||||
|
location: Optional[str],
|
||||||
|
location_coords: Optional[dict],
|
||||||
|
now: datetime,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Create a master shell incident linking two child incidents.
|
||||||
|
The master owns no calls directly — it is a grouping record.
|
||||||
|
Returns the new master incident_id.
|
||||||
|
"""
|
||||||
|
master_id = str(uuid.uuid4())
|
||||||
|
doc = {
|
||||||
|
"incident_id": master_id,
|
||||||
|
"title": f"Multi-agency {operational_type} incident",
|
||||||
|
"incident_type": "master",
|
||||||
|
"type": operational_type,
|
||||||
|
"status": "active",
|
||||||
|
"location": location,
|
||||||
|
"location_coords": location_coords,
|
||||||
|
"child_incident_ids": [first_child_id, second_child_id],
|
||||||
|
"parent_incident_id": None,
|
||||||
|
"call_ids": [],
|
||||||
|
"talkgroup_ids": [],
|
||||||
|
"system_ids": [],
|
||||||
|
"tags": [],
|
||||||
|
"units": [],
|
||||||
|
"vehicles": [],
|
||||||
|
"severity": "unknown",
|
||||||
|
"summary": None,
|
||||||
|
"summary_stale": True,
|
||||||
|
"summary_last_run": None,
|
||||||
|
"embedding": None,
|
||||||
|
"embedding_count": 0,
|
||||||
|
"has_updates": False,
|
||||||
|
"started_at": now.isoformat(),
|
||||||
|
"updated_at": now.isoformat(),
|
||||||
|
}
|
||||||
|
await fstore.doc_set("incidents", master_id, doc, merge=False)
|
||||||
|
logger.info(f"Correlator: created master incident {master_id} linking {first_child_id} + {second_child_id}")
|
||||||
|
return master_id
|
||||||
|
|
||||||
|
|
||||||
|
async def _demote_to_child(incident_id: str, parent_id: str) -> None:
|
||||||
|
"""Demote a standalone master incident to a child by setting incident_type and parent reference."""
|
||||||
|
await fstore.doc_set("incidents", incident_id, {
|
||||||
|
"incident_type": "child",
|
||||||
|
"parent_incident_id": parent_id,
|
||||||
|
})
|
||||||
|
logger.info(f"Correlator: demoted incident {incident_id} → child of master {parent_id}")
|
||||||
|
|
||||||
|
|
||||||
|
async def _add_child_to_master(master_id: str, child_id: str, now: datetime) -> None:
|
||||||
|
"""Append a new child to an existing master's child_incident_ids list."""
|
||||||
|
master = await fstore.doc_get("incidents", master_id)
|
||||||
|
if not master:
|
||||||
|
return
|
||||||
|
children = list(master.get("child_incident_ids") or [])
|
||||||
|
if child_id not in children:
|
||||||
|
children.append(child_id)
|
||||||
|
updates: dict = {"child_incident_ids": children, "updated_at": now.isoformat()}
|
||||||
|
# Re-open a resolved master when a new child is added (retroactive link)
|
||||||
|
if master.get("status") == "resolved":
|
||||||
|
updates["has_updates"] = True
|
||||||
|
await fstore.doc_set("incidents", master_id, updates)
|
||||||
|
|
||||||
|
|
||||||
|
async def maybe_resolve_parent(incident_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Called after resolving a child incident.
|
||||||
|
If all siblings under the same master are also resolved, auto-resolve the master.
|
||||||
|
Safe to call on non-child incidents — exits immediately when there's no parent.
|
||||||
|
"""
|
||||||
|
inc = await fstore.doc_get("incidents", incident_id)
|
||||||
|
if not inc:
|
||||||
|
return
|
||||||
|
parent_id = inc.get("parent_incident_id")
|
||||||
|
if not parent_id:
|
||||||
|
return # standalone or already a master — nothing to propagate
|
||||||
|
|
||||||
|
parent = await fstore.doc_get("incidents", parent_id)
|
||||||
|
if not parent or parent.get("status") == "resolved":
|
||||||
|
return # master already closed
|
||||||
|
|
||||||
|
child_ids: list[str] = parent.get("child_incident_ids") or []
|
||||||
|
if not child_ids:
|
||||||
|
return
|
||||||
|
|
||||||
|
for cid in child_ids:
|
||||||
|
if cid == incident_id:
|
||||||
|
continue # the one we just resolved
|
||||||
|
child = await fstore.doc_get("incidents", cid)
|
||||||
|
if not child or child.get("status") != "resolved":
|
||||||
|
return # at least one sibling still active
|
||||||
|
|
||||||
|
# All children resolved — close the master
|
||||||
|
await fstore.doc_set("incidents", parent_id, {"status": "resolved"})
|
||||||
|
logger.info(
|
||||||
|
f"Auto-resolved master incident {parent_id} "
|
||||||
|
f"(all {len(child_ids)} child(ren) resolved)"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def _find_cross_system_parent(
|
||||||
|
system_id: str,
|
||||||
|
incident_type: Optional[str],
|
||||||
|
location: Optional[str],
|
||||||
|
location_coords: Optional[dict],
|
||||||
|
call_embedding: Optional[list],
|
||||||
|
recent: list[dict],
|
||||||
|
) -> Optional[dict]:
|
||||||
|
"""
|
||||||
|
Scan active incidents from OTHER systems for a cross-agency parent candidate.
|
||||||
|
|
||||||
|
Match criteria (need at least two signals firing together):
|
||||||
|
A. Road/route identifier overlap between the new call's location and the
|
||||||
|
incident's accumulated location_mentions. Any shared route number or
|
||||||
|
road name is a strong positive — two agencies don't randomly share the
|
||||||
|
same road name in the same window.
|
||||||
|
B. Content embedding similarity ≥ 0.78 (lower than same-system slow path
|
||||||
|
because we're linking, not merging).
|
||||||
|
C. Geocoded proximity — 1 km for static scenes (≤2 location_mentions),
|
||||||
|
3 km for dynamic/moving scenes (chase, expanding perimeter).
|
||||||
|
|
||||||
|
Returns the best matching incident (master or standalone), or None.
|
||||||
|
Child incidents are resolved to their parent before matching so we always
|
||||||
|
attach to the master level.
|
||||||
|
"""
|
||||||
|
best_inc: Optional[dict] = None
|
||||||
|
best_score = 0.0
|
||||||
|
|
||||||
|
for inc in recent:
|
||||||
|
# Only cross-system candidates
|
||||||
|
if system_id in (inc.get("system_ids") or []):
|
||||||
|
continue
|
||||||
|
if not _operational_types_compatible(incident_type, inc.get("type")):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If the candidate is already a child, resolve to its parent
|
||||||
|
if inc.get("incident_type") == "child" and inc.get("parent_incident_id"):
|
||||||
|
parent = await fstore.doc_get("incidents", inc["parent_incident_id"])
|
||||||
|
if parent and parent.get("status") == "active":
|
||||||
|
inc = parent
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
score = 0.0
|
||||||
|
inc_mentions: list[str] = inc.get("location_mentions") or []
|
||||||
|
|
||||||
|
# Signal A — road/route identifier overlap (0.4).
|
||||||
|
# Shared route numbers are a strong signal but not conclusive alone.
|
||||||
|
if location and _location_mentions_road_overlap(location, inc_mentions):
|
||||||
|
score += 0.4
|
||||||
|
|
||||||
|
# Signal B — content embedding similarity ≥ 0.78 (0.3 flat bonus).
|
||||||
|
inc_embedding = inc.get("embedding")
|
||||||
|
if call_embedding and inc_embedding:
|
||||||
|
if _cosine_similarity(call_embedding, inc_embedding) >= 0.78:
|
||||||
|
score += 0.3
|
||||||
|
|
||||||
|
# Signal C — geocoded proximity (0.3).
|
||||||
|
# Dynamic scenes (3+ location mentions = chase/moving perimeter) use 3 km;
|
||||||
|
# static mutual aid (≤2 mentions) uses 1 km.
|
||||||
|
inc_coords = inc.get("location_coords")
|
||||||
|
if location_coords and inc_coords:
|
||||||
|
dist_km = _haversine_km(
|
||||||
|
location_coords["lat"], location_coords["lng"],
|
||||||
|
inc_coords["lat"], inc_coords["lng"],
|
||||||
|
)
|
||||||
|
radius = 3.0 if len(inc_mentions) >= 3 else 1.0
|
||||||
|
if dist_km <= radius:
|
||||||
|
score += 0.3
|
||||||
|
|
||||||
|
# threshold = 0.5 → requires at least two signals (A+B, A+C, or B+C).
|
||||||
|
# No single signal alone can clear the bar.
|
||||||
|
if score >= 0.5 and score > best_score:
|
||||||
|
best_score = score
|
||||||
|
best_inc = inc
|
||||||
|
|
||||||
|
return best_inc
|
||||||
|
|
||||||
|
|
||||||
def _cosine_similarity(a: list[float], b: list[float]) -> float:
|
def _cosine_similarity(a: list[float], b: list[float]) -> float:
|
||||||
import numpy as np
|
import numpy as np
|
||||||
va, vb = np.array(a, dtype=float), np.array(b, dtype=float)
|
va, vb = np.array(a, dtype=float), np.array(b, dtype=float)
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
"""
|
"""
|
||||||
GPT-4o-mini intelligence extraction from call transcripts.
|
GPT-4o-mini intelligence extraction from call transcripts.
|
||||||
|
|
||||||
Sends the transcript to GPT-4o mini with a tight JSON schema prompt.
|
Sends the transcript to GPT-4o-mini with a structured prompt that detects
|
||||||
Returns structured data: incident type, tags, location, vehicles, units, severity.
|
whether the recording contains one or multiple distinct scenes (back-to-back
|
||||||
|
dispatch conversations on a busy channel). Returns a list of scene dicts —
|
||||||
|
one per detected incident. Most calls produce a single scene.
|
||||||
|
|
||||||
Falls back gracefully if the API is unavailable or returns malformed output.
|
Falls back gracefully if the API is unavailable or returns malformed output.
|
||||||
"""
|
"""
|
||||||
@@ -13,30 +15,41 @@ from typing import Optional
|
|||||||
from app.internal.logger import logger
|
from app.internal.logger import logger
|
||||||
from app.internal import firestore as fstore
|
from app.internal import firestore as fstore
|
||||||
|
|
||||||
_PROMPT_TEMPLATE = """You are analyzing a P25 public safety radio recording. The audio was transcribed by Whisper through a digital radio vocoder, which introduces errors. Each numbered transmission is a separate PTT press from a different radio. Extract structured information and respond ONLY with a single valid JSON object — no markdown, no explanation.
|
_PROMPT_TEMPLATE = """You are analyzing a P25 public safety radio recording. The audio was transcribed by Whisper through a digital radio vocoder, which introduces errors. Each numbered transmission is a separate PTT press from a different radio.
|
||||||
|
|
||||||
Schema:
|
SCENE DETECTION:
|
||||||
{{
|
A busy dispatch channel sometimes captures back-to-back conversations about multiple concurrent incidents in a single recording. Detect whether this recording contains ONE scene (all transmissions relate to a single event) or MULTIPLE scenes (clearly distinct dispatch conversations with different units being assigned, different locations, different event types). Assign short status transmissions (10-4, en route, acknowledgements) with no clear scene context to the most recent scene before them in the list.
|
||||||
"incident_type": one of "fire" | "ems" | "police" | "accident" | "other" | "unknown",
|
|
||||||
"tags": [list of specific descriptive tags, max 6, e.g. "two-car mva", "property-damage-only", "working fire", "shots-fired"],
|
Always respond with the scenes array, even for a single scene.
|
||||||
"location": "most specific location string found, or empty string",
|
|
||||||
"vehicles": [vehicle descriptions mentioned, e.g. "Hyundai Tucson", "black sedan"],
|
Response format — a JSON object with a "scenes" array. Each scene:
|
||||||
"units": [unit IDs or officer numbers mentioned, e.g. "Unit 511", "Car 4"],
|
segment_indices: list of 0-based indices into the numbered transmissions (or null if no segments)
|
||||||
"severity": one of "minor" | "moderate" | "major" | "unknown",
|
incident_type: one of "fire" | "ems" | "police" | "accident" | "other" | "unknown"
|
||||||
"resolved": true if this call explicitly signals the incident is over ("Code 4", "in custody", "all clear", "fire out", "patient transported", "GOA", "scene clear", "10-42", "negative contact", "clear the scene"), false otherwise,
|
tags: list of specific descriptive tags, max 6, e.g. "two-car mva", "working fire", "shots-fired"
|
||||||
"transcript_corrected": "corrected full transcript string, or null if no corrections needed"
|
location: most specific location string found, or empty string
|
||||||
}}
|
vehicles: list of vehicle descriptions mentioned
|
||||||
|
units: list of unit IDs or officer numbers explicitly mentioned
|
||||||
|
cleared_units: list of unit IDs that explicitly signal back-in-service or available in this recording
|
||||||
|
severity: one of "minor" | "moderate" | "major" | "unknown"
|
||||||
|
resolved: true if this scene explicitly signals incident closure, false otherwise
|
||||||
|
reassignment: true if dispatch is actively pulling a unit away from their current assignment to respond to a new, different call — e.g. "Baker, can you clear and respond to...", "Adam, break from that and go to...". False if the unit is simply reporting in, updating status, or continuing their current assignment.
|
||||||
|
transcript_corrected: corrected text for this scene's transmissions only, or null
|
||||||
|
|
||||||
Rules:
|
Rules:
|
||||||
- location: prefer intersections > addresses > mile markers > route+town > route alone > town alone. Empty string if none.
|
- location: prefer intersections > addresses > mile markers > route+town > route alone > town alone. Empty string if none.
|
||||||
- tags: be specific and lowercase, hyphenated. Do not repeat incident_type as a tag.
|
- tags: describe WHAT happened, not WHERE. Specific, lowercase, hyphenated. Do not use location names, road names, talkgroup names, or place names as tags (wrong: "lower-macy's", "canvas-route-6", "route-202"; right: "suspect-search", "shoplifting", "vehicle-pursuit"). Do not repeat incident_type as a tag.
|
||||||
- units: only identifiers explicitly mentioned, not inferred.
|
- units: only identifiers explicitly mentioned, not inferred.
|
||||||
- Do not invent details not present in the transcript.
|
- Do not invent details not present in the transcript.
|
||||||
- transcript_corrected: fix only clear STT errors caused by vocoder distortion (e.g. "Several" → "10-4", misheard street names, garbled unit IDs). Use the back-and-forth context between transmissions to resolve ambiguities. Keep all radio language as-is — do NOT decode codes into plain English. Return null if the transcript looks accurate.
|
- incident_type: let the talkgroup channel be your primary signal. Use "fire" ONLY if the talkgroup is clearly a fire/rescue channel OR the transcript explicitly describes active fire, smoke, flames, or structure fire activation. Police or EMS referencing a fire scene → use "police" or "ems". When uncertain, prefer "other" over "fire".
|
||||||
|
- ten_codes: interpret radio codes using the department reference provided below. Do not guess codes not listed.
|
||||||
|
- resolved: true only when the scene explicitly signals "Code 4", "all clear", "10-42", "in custody", "patient transported", "fire out", "GOA", "negative contact", "scene clear".
|
||||||
|
- cleared_units: only include units that explicitly stated their own back-in-service status in this recording (e.g. "Unit 7, 10-8", "Baker-1 available", "E-14 back in service", or the department ten-code for available/back-in-service listed above). Silence or absence of a unit is NOT clearance. A scene-wide Code 4 belongs in resolved=true, not here — cleared_units is for individual unit availability signals only.
|
||||||
|
- reassignment: only true when a unit is explicitly being pulled to a completely new call or location. A unit going en route to their first dispatch is NOT a reassignment. Routine status updates, acknowledgements, and scene updates are NOT reassignments.
|
||||||
|
- transcript_corrected: fix only clear STT/vocoder errors (e.g. "Several" → "10-4", misheard street names, garbled unit IDs). Keep all radio language as-is — do NOT decode codes into plain English. Return null if accurate.
|
||||||
|
|
||||||
System: {system_id}
|
System: {system_id}
|
||||||
Talkgroup: {talkgroup_name}
|
Talkgroup: {talkgroup_name}
|
||||||
{transcript_block}"""
|
{ten_codes_block}{vocabulary_block}{transcript_block}"""
|
||||||
|
|
||||||
# Nominatim viewbox half-width in degrees (~11 km at mid-latitudes)
|
# Nominatim viewbox half-width in degrees (~11 km at mid-latitudes)
|
||||||
_GEO_DELTA = 0.1
|
_GEO_DELTA = 0.1
|
||||||
@@ -54,7 +67,14 @@ _TG_SUFFIX_RE = re.compile(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def extract_tags(
|
def _build_ten_codes_block(ten_codes: dict[str, str]) -> str:
|
||||||
|
if not ten_codes:
|
||||||
|
return ""
|
||||||
|
lines = "\n".join(f" {code}: {meaning}" for code, meaning in sorted(ten_codes.items()))
|
||||||
|
return f"Department ten-codes:\n{lines}\n\n"
|
||||||
|
|
||||||
|
|
||||||
|
async def extract_scenes(
|
||||||
call_id: str,
|
call_id: str,
|
||||||
transcript: str,
|
transcript: str,
|
||||||
talkgroup_name: Optional[str] = None,
|
talkgroup_name: Optional[str] = None,
|
||||||
@@ -63,77 +83,134 @@ async def extract_tags(
|
|||||||
segments: Optional[list[dict]] = None,
|
segments: Optional[list[dict]] = None,
|
||||||
node_id: Optional[str] = None,
|
node_id: Optional[str] = None,
|
||||||
preserve_transcript_correction: bool = False,
|
preserve_transcript_correction: bool = False,
|
||||||
) -> tuple[list[str], Optional[str], Optional[str], Optional[dict], bool]:
|
) -> list[dict]:
|
||||||
"""
|
"""
|
||||||
Extract incident tags, type, location, corrected transcript, and closure signal via GPT-4o mini.
|
Split the transcript into one or more scenes and extract structured
|
||||||
Geocodes the extracted location string via Nominatim using the node's position as bias.
|
intelligence for each. Most calls return a single scene; a busy dispatch
|
||||||
|
channel capturing back-to-back conversations returns multiple.
|
||||||
|
|
||||||
Returns:
|
Each scene dict contains:
|
||||||
(tags, primary_type, location_str, location_coords, resolved)
|
tags, incident_type, location, location_coords, resolved,
|
||||||
where location_coords is {"lat": float, "lng": float} or None,
|
severity, vehicles, units, transcript_corrected,
|
||||||
and resolved is True when the transcript signals incident closure.
|
segment_indices, embedding
|
||||||
|
|
||||||
Side-effect: updates calls/{call_id} in Firestore with tags, location,
|
Side-effect: updates calls/{call_id} in Firestore with merged tags,
|
||||||
location_coords, vehicles, units, severity, transcript_corrected; also stores embedding.
|
location (primary scene), units/vehicles, severity, embedding, and
|
||||||
|
optionally transcript_corrected.
|
||||||
"""
|
"""
|
||||||
result = await asyncio.to_thread(
|
vocabulary: list[str] = []
|
||||||
_sync_extract, transcript, talkgroup_name, talkgroup_id, system_id, segments
|
ten_codes: dict[str, str] = {}
|
||||||
|
if system_id:
|
||||||
|
# Single cached read — vocabulary and ten_codes live on the same document.
|
||||||
|
system_doc = await fstore.doc_get_cached("systems", system_id)
|
||||||
|
if system_doc:
|
||||||
|
vocabulary = system_doc.get("vocabulary") or []
|
||||||
|
ten_codes = system_doc.get("ten_codes") or {}
|
||||||
|
|
||||||
|
raw_scenes: list[dict] = await asyncio.to_thread(
|
||||||
|
_sync_extract,
|
||||||
|
transcript, talkgroup_name, talkgroup_id, system_id, segments, vocabulary, ten_codes,
|
||||||
)
|
)
|
||||||
|
|
||||||
tags: list[str] = result.get("tags") or []
|
if not raw_scenes:
|
||||||
incident_type: Optional[str] = result.get("incident_type") or None
|
return []
|
||||||
location: Optional[str] = result.get("location") or None
|
|
||||||
vehicles: list[str] = result.get("vehicles") or []
|
|
||||||
units: list[str] = result.get("units") or []
|
|
||||||
severity: str = result.get("severity") or "unknown"
|
|
||||||
resolved: bool = bool(result.get("resolved", False))
|
|
||||||
transcript_corrected: Optional[str] = result.get("transcript_corrected") or None
|
|
||||||
|
|
||||||
if incident_type in ("unknown", "other", ""):
|
# Resolve node position once for geocoding all scenes
|
||||||
incident_type = None
|
node_lat: Optional[float] = None
|
||||||
|
node_lon: Optional[float] = None
|
||||||
# Geocode the location string if we have one and a node to bias toward
|
if node_id:
|
||||||
location_coords: Optional[dict] = None
|
node_doc = await fstore.doc_get_cached("nodes", node_id)
|
||||||
if location and node_id:
|
|
||||||
node_doc = await fstore.doc_get("nodes", node_id)
|
|
||||||
if node_doc:
|
if node_doc:
|
||||||
node_lat = node_doc.get("lat")
|
node_lat = node_doc.get("lat")
|
||||||
node_lon = node_doc.get("lon")
|
node_lon = node_doc.get("lon")
|
||||||
if node_lat is not None and node_lon is not None:
|
|
||||||
state = await _get_node_state(node_id, node_lat, node_lon)
|
|
||||||
muni = _municipality_from_tg(talkgroup_name)
|
|
||||||
hint_parts = [p for p in [muni, state] if p]
|
|
||||||
query = f"{location}, {', '.join(hint_parts)}" if hint_parts else location
|
|
||||||
location_coords = await _geocode_location(query, node_lat, node_lon)
|
|
||||||
|
|
||||||
# Store embedding alongside structured data
|
processed: list[dict] = []
|
||||||
embedding = await asyncio.to_thread(_sync_embed, _embed_text(transcript, incident_type))
|
for scene in raw_scenes:
|
||||||
|
tags: list[str] = scene.get("tags") or []
|
||||||
|
incident_type: Optional[str] = scene.get("incident_type") or None
|
||||||
|
location: Optional[str] = scene.get("location") or None
|
||||||
|
vehicles: list[str] = scene.get("vehicles") or []
|
||||||
|
units: list[str] = scene.get("units") or []
|
||||||
|
cleared_units: list[str] = scene.get("cleared_units") or []
|
||||||
|
severity: str = scene.get("severity") or "unknown"
|
||||||
|
resolved: bool = bool(scene.get("resolved", False))
|
||||||
|
reassignment: bool = bool(scene.get("reassignment", False))
|
||||||
|
transcript_corrected: Optional[str]= scene.get("transcript_corrected") or None
|
||||||
|
segment_indices: Optional[list] = scene.get("segment_indices")
|
||||||
|
|
||||||
updates: dict = {"tags": tags, "severity": severity}
|
if incident_type in ("unknown", "other", ""):
|
||||||
if location:
|
incident_type = None
|
||||||
updates["location"] = location
|
|
||||||
if location_coords:
|
# Geocode this scene's location
|
||||||
updates["location_coords"] = location_coords
|
location_coords: Optional[dict] = None
|
||||||
if vehicles:
|
if location and node_lat is not None and node_lon is not None:
|
||||||
updates["vehicles"] = vehicles
|
state = await _get_node_state(node_id, node_lat, node_lon)
|
||||||
if units:
|
muni = _municipality_from_tg(talkgroup_name)
|
||||||
updates["units"] = units
|
hint_parts = [p for p in [muni, state] if p]
|
||||||
if embedding:
|
query = f"{location}, {', '.join(hint_parts)}" if hint_parts else location
|
||||||
updates["embedding"] = embedding
|
location_coords = await _geocode_location(query, node_lat, node_lon)
|
||||||
if transcript_corrected and not preserve_transcript_correction:
|
|
||||||
updates["transcript_corrected"] = transcript_corrected
|
# Embed this scene's content
|
||||||
|
scene_text = _build_scene_embed_text(
|
||||||
|
transcript, segments, segment_indices, incident_type, transcript_corrected
|
||||||
|
)
|
||||||
|
embedding = await asyncio.to_thread(_sync_embed, scene_text)
|
||||||
|
|
||||||
|
processed.append({
|
||||||
|
"tags": tags,
|
||||||
|
"incident_type": incident_type,
|
||||||
|
"location": location,
|
||||||
|
"location_coords": location_coords,
|
||||||
|
"vehicles": vehicles,
|
||||||
|
"units": units,
|
||||||
|
"cleared_units": cleared_units,
|
||||||
|
"severity": severity,
|
||||||
|
"resolved": resolved,
|
||||||
|
"reassignment": reassignment,
|
||||||
|
"transcript_corrected": transcript_corrected,
|
||||||
|
"segment_indices": segment_indices,
|
||||||
|
"embedding": embedding,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Merge across scenes for the call-level Firestore document.
|
||||||
|
# Primary scene (first) owns location, severity, transcript_corrected.
|
||||||
|
# Tags/units/vehicles are union-merged from all scenes.
|
||||||
|
primary = processed[0]
|
||||||
|
all_tags = list(dict.fromkeys(t for s in processed for t in s["tags"]))
|
||||||
|
all_units = list(dict.fromkeys(u for s in processed for u in s["units"]))
|
||||||
|
all_vehicles = list(dict.fromkeys(v for s in processed for v in s["vehicles"]))
|
||||||
|
all_cleared = list(dict.fromkeys(u for s in processed for u in s["cleared_units"]))
|
||||||
|
|
||||||
|
updates: dict = {"tags": all_tags, "severity": primary["severity"]}
|
||||||
|
if primary["location"]:
|
||||||
|
updates["location"] = primary["location"]
|
||||||
|
if primary["location_coords"]:
|
||||||
|
updates["location_coords"] = primary["location_coords"]
|
||||||
|
if all_units:
|
||||||
|
updates["units"] = all_units
|
||||||
|
if all_cleared:
|
||||||
|
updates["cleared_units"] = all_cleared
|
||||||
|
if all_vehicles:
|
||||||
|
updates["vehicles"] = all_vehicles
|
||||||
|
if primary["embedding"]:
|
||||||
|
updates["embedding"] = primary["embedding"]
|
||||||
|
if primary["transcript_corrected"] and not preserve_transcript_correction:
|
||||||
|
updates["transcript_corrected"] = primary["transcript_corrected"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await fstore.doc_set("calls", call_id, updates)
|
await fstore.doc_set("calls", call_id, updates)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Could not save intelligence for call {call_id}: {e}")
|
logger.warning(f"Could not save intelligence for call {call_id}: {e}")
|
||||||
|
|
||||||
logger.info(
|
scene_summary = (
|
||||||
f"Intelligence: call {call_id} → type={incident_type}, "
|
f"{len(processed)} scene(s): "
|
||||||
f"tags={tags}, location={location!r}, coords={location_coords}, severity={severity}, "
|
+ ", ".join(
|
||||||
f"corrected={transcript_corrected is not None}"
|
f"[{s['incident_type'] or 'unclassified'} tags={s['tags'][:2]}]"
|
||||||
|
for s in processed
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return tags, incident_type, location, location_coords, resolved
|
logger.info(f"Intelligence: call {call_id} → {scene_summary}")
|
||||||
|
return processed
|
||||||
|
|
||||||
|
|
||||||
async def _geocode_location(
|
async def _geocode_location(
|
||||||
@@ -213,7 +290,6 @@ def _municipality_from_tg(tg_name: Optional[str]) -> Optional[str]:
|
|||||||
if not tg_name:
|
if not tg_name:
|
||||||
return None
|
return None
|
||||||
cleaned = _TG_SUFFIX_RE.sub("", tg_name).strip()
|
cleaned = _TG_SUFFIX_RE.sub("", tg_name).strip()
|
||||||
# Discard if nothing left, purely numeric, or a short all-caps abbreviation (e.g. "WC", "TAC")
|
|
||||||
if not cleaned or cleaned.isdigit() or (len(cleaned) <= 3 and cleaned.isupper()):
|
if not cleaned or cleaned.isdigit() or (len(cleaned) <= 3 and cleaned.isupper()):
|
||||||
return None
|
return None
|
||||||
return cleaned
|
return cleaned
|
||||||
@@ -227,26 +303,48 @@ def _build_transcript_block(transcript: str, segments: Optional[list[dict]]) ->
|
|||||||
return f"Transcript:\n{transcript}"
|
return f"Transcript:\n{transcript}"
|
||||||
|
|
||||||
|
|
||||||
|
def _build_scene_embed_text(
|
||||||
|
transcript: str,
|
||||||
|
segments: Optional[list[dict]],
|
||||||
|
segment_indices: Optional[list[int]],
|
||||||
|
incident_type: Optional[str],
|
||||||
|
transcript_corrected: Optional[str],
|
||||||
|
) -> str:
|
||||||
|
"""Build the text string to embed for a specific scene."""
|
||||||
|
prefix = f"[{incident_type}] " if incident_type else ""
|
||||||
|
if transcript_corrected:
|
||||||
|
return f"{prefix}{transcript_corrected}"
|
||||||
|
if segments and segment_indices:
|
||||||
|
texts = [segments[i]["text"] for i in segment_indices if i < len(segments)]
|
||||||
|
return f"{prefix}{' '.join(texts)}"
|
||||||
|
return f"{prefix}{transcript}"
|
||||||
|
|
||||||
|
|
||||||
def _sync_extract(
|
def _sync_extract(
|
||||||
transcript: str,
|
transcript: str,
|
||||||
talkgroup_name: Optional[str],
|
talkgroup_name: Optional[str],
|
||||||
talkgroup_id: Optional[int],
|
talkgroup_id: Optional[int],
|
||||||
system_id: Optional[str],
|
system_id: Optional[str],
|
||||||
segments: Optional[list[dict]],
|
segments: Optional[list[dict]],
|
||||||
) -> dict:
|
vocabulary: Optional[list[str]] = None,
|
||||||
"""Call GPT-4o mini and parse the JSON response."""
|
ten_codes: Optional[dict[str, str]] = None,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Call GPT-4o-mini and return a list of scene dicts."""
|
||||||
from app.config import settings
|
from app.config import settings
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
|
|
||||||
if not settings.openai_api_key:
|
if not settings.openai_api_key:
|
||||||
logger.warning("OPENAI_API_KEY not set — intelligence extraction disabled.")
|
logger.warning("OPENAI_API_KEY not set — intelligence extraction disabled.")
|
||||||
return {}
|
return []
|
||||||
|
|
||||||
|
from app.internal.vocabulary_learner import build_gpt_vocab_block
|
||||||
tg = f"{talkgroup_name} (TGID {talkgroup_id})" if talkgroup_id else (talkgroup_name or "unknown")
|
tg = f"{talkgroup_name} (TGID {talkgroup_id})" if talkgroup_id else (talkgroup_name or "unknown")
|
||||||
prompt = _PROMPT_TEMPLATE.format(
|
prompt = _PROMPT_TEMPLATE.format(
|
||||||
transcript_block=_build_transcript_block(transcript, segments),
|
transcript_block=_build_transcript_block(transcript, segments),
|
||||||
talkgroup_name=tg,
|
talkgroup_name=tg,
|
||||||
system_id=system_id or "unknown",
|
system_id=system_id or "unknown",
|
||||||
|
ten_codes_block=_build_ten_codes_block(ten_codes or {}),
|
||||||
|
vocabulary_block=build_gpt_vocab_block(vocabulary or []),
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -256,13 +354,22 @@ def _sync_extract(
|
|||||||
messages=[{"role": "user", "content": prompt}],
|
messages=[{"role": "user", "content": prompt}],
|
||||||
response_format={"type": "json_object"},
|
response_format={"type": "json_object"},
|
||||||
)
|
)
|
||||||
return json.loads(response.choices[0].message.content)
|
raw = json.loads(response.choices[0].message.content)
|
||||||
|
|
||||||
|
# New format: {"scenes": [...]}
|
||||||
|
if "scenes" in raw and isinstance(raw["scenes"], list):
|
||||||
|
return raw["scenes"]
|
||||||
|
|
||||||
|
# Fallback: GPT returned the old flat single-scene format
|
||||||
|
logger.warning("GPT returned flat format instead of scenes array — wrapping")
|
||||||
|
return [raw]
|
||||||
|
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
logger.warning(f"GPT-4o mini returned non-JSON: {e}")
|
logger.warning(f"GPT-4o-mini returned non-JSON: {e}")
|
||||||
return {}
|
return []
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"GPT-4o mini extraction failed: {e}")
|
logger.warning(f"GPT-4o-mini extraction failed: {e}")
|
||||||
return {}
|
return []
|
||||||
|
|
||||||
|
|
||||||
def _sync_embed(text: str) -> Optional[list[float]]:
|
def _sync_embed(text: str) -> Optional[list[float]]:
|
||||||
@@ -280,8 +387,3 @@ def _sync_embed(text: str) -> Optional[list[float]]:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Embedding generation failed: {e}")
|
logger.warning(f"Embedding generation failed: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _embed_text(transcript: str, incident_type: Optional[str]) -> str:
|
|
||||||
prefix = f"[{incident_type}] " if incident_type else ""
|
|
||||||
return f"{prefix}{transcript}"
|
|
||||||
|
|||||||
@@ -109,10 +109,11 @@ class MQTTHandler:
|
|||||||
updates["status"] = "online"
|
updates["status"] = "online"
|
||||||
await fstore.doc_update("nodes", node_id, updates)
|
await fstore.doc_update("nodes", node_id, updates)
|
||||||
|
|
||||||
# Release any orphaned Discord token when the node explicitly reports disconnected
|
# NOTE: discord_connected in checkins is informational only — do NOT release the
|
||||||
if payload.get("discord_connected") is False:
|
# token here. The bot watchdog reconnects on transient Discord drops, so a single
|
||||||
from app.routers.tokens import release_token
|
# checkin with discord_connected=False during a brief reconnect window would
|
||||||
await release_token(node_id)
|
# incorrectly free the token while the bot is still active. Token release is
|
||||||
|
# handled exclusively by the discord_leave command and the node offline sweeper.
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
# Status update
|
# Status update
|
||||||
@@ -143,8 +144,8 @@ class MQTTHandler:
|
|||||||
if not call_id:
|
if not call_id:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Look up assigned system for this node
|
# Look up assigned system for this node (cached — assignment rarely changes)
|
||||||
node = await fstore.doc_get("nodes", node_id)
|
node = await fstore.doc_get_cached("nodes", node_id)
|
||||||
system_id = node.get("assigned_system_id") if node else None
|
system_id = node.get("assigned_system_id") if node else None
|
||||||
|
|
||||||
started_at_raw = payload.get("started_at")
|
started_at_raw = payload.get("started_at")
|
||||||
@@ -157,7 +158,7 @@ class MQTTHandler:
|
|||||||
# Prefer the name from OP25 metadata; fall back to the system config
|
# Prefer the name from OP25 metadata; fall back to the system config
|
||||||
tgid_name = payload.get("tgid_name") or ""
|
tgid_name = payload.get("tgid_name") or ""
|
||||||
if not tgid_name and system_id and payload.get("tgid"):
|
if not tgid_name and system_id and payload.get("tgid"):
|
||||||
system_doc = await fstore.doc_get("systems", system_id)
|
system_doc = await fstore.doc_get_cached("systems", system_id)
|
||||||
if system_doc:
|
if system_doc:
|
||||||
tgid_int = int(payload["tgid"])
|
tgid_int = int(payload["tgid"])
|
||||||
for tg in system_doc.get("config", {}).get("talkgroups", []):
|
for tg in system_doc.get("config", {}).get("talkgroups", []):
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from app.config import settings
|
|||||||
from app.internal.logger import logger
|
from app.internal.logger import logger
|
||||||
from app.internal import firestore as fstore
|
from app.internal import firestore as fstore
|
||||||
|
|
||||||
SWEEP_INTERVAL = 30 # seconds
|
SWEEP_INTERVAL = 90 # seconds — matches node_offline_threshold; no gain in checking faster
|
||||||
|
|
||||||
|
|
||||||
async def sweeper_loop():
|
async def sweeper_loop():
|
||||||
|
|||||||
@@ -0,0 +1,128 @@
|
|||||||
|
"""
|
||||||
|
Re-correlation sweep.
|
||||||
|
|
||||||
|
Runs every summary_interval_minutes (same tick as the summarizer). Each pass
|
||||||
|
finds calls that are:
|
||||||
|
- recently ended (ended_at within the last recorrelation_scan_minutes)
|
||||||
|
- still orphaned (incident_id is null)
|
||||||
|
|
||||||
|
and re-runs the incident correlator against currently-active incidents, using
|
||||||
|
the call's own started_at as the time anchor so the window is correct regardless
|
||||||
|
of when the sweep fires.
|
||||||
|
|
||||||
|
Never creates new incidents — link-only. Zero LLM tokens (uses pre-computed
|
||||||
|
talkgroup strings, haversine math, and stored embeddings).
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from typing import Optional
|
||||||
|
from app.internal.logger import logger
|
||||||
|
from app.internal import firestore as fstore
|
||||||
|
from app.config import settings
|
||||||
|
|
||||||
|
|
||||||
|
async def recorrelation_loop() -> None:
|
||||||
|
interval = settings.summary_interval_minutes * 60
|
||||||
|
logger.info(
|
||||||
|
f"Re-correlation sweep started — "
|
||||||
|
f"interval: {settings.summary_interval_minutes}m, "
|
||||||
|
f"scan window: {settings.recorrelation_scan_minutes}m"
|
||||||
|
)
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(interval)
|
||||||
|
try:
|
||||||
|
await _run_sweep_pass()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Re-correlation sweep failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def _run_sweep_pass() -> None:
|
||||||
|
cutoff = datetime.now(timezone.utc) - timedelta(minutes=settings.recorrelation_scan_minutes)
|
||||||
|
|
||||||
|
# Server-side range query: only calls that ended within the scan window.
|
||||||
|
# Filter incident_id=null client-side (Firestore can't query for missing fields).
|
||||||
|
# This keeps the fetched set small regardless of total collection size.
|
||||||
|
recent_ended = await fstore.collection_where("calls", [
|
||||||
|
("status", "==", "ended"),
|
||||||
|
("ended_at", ">=", cutoff),
|
||||||
|
])
|
||||||
|
# corr_path="unlinked" is written after MAX_SWEEP_ATTEMPTS failures.
|
||||||
|
# Allows a few retries so a welfare-check call can link to an escalation
|
||||||
|
# incident that is created a few minutes later, without sweeping 30× forever.
|
||||||
|
MAX_SWEEP_ATTEMPTS = 3
|
||||||
|
orphans = [
|
||||||
|
c for c in recent_ended
|
||||||
|
if not c.get("incident_ids") and not c.get("incident_id")
|
||||||
|
and not c.get("corr_path") # skip calls already exhausted
|
||||||
|
and c.get("corr_sweep_count", 0) < MAX_SWEEP_ATTEMPTS
|
||||||
|
]
|
||||||
|
|
||||||
|
if not orphans:
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(f"Re-correlation sweep: {len(orphans)} orphaned call(s) to check")
|
||||||
|
linked = 0
|
||||||
|
for call in orphans:
|
||||||
|
if await _recorrelate_orphan(call):
|
||||||
|
linked += 1
|
||||||
|
|
||||||
|
if linked:
|
||||||
|
logger.info(f"Re-correlation sweep: linked {linked}/{len(orphans)} orphaned call(s)")
|
||||||
|
|
||||||
|
|
||||||
|
async def _recorrelate_orphan(call: dict) -> bool:
|
||||||
|
"""
|
||||||
|
Attempt to link a single orphaned call to an existing incident.
|
||||||
|
Returns True if a match was found and the call was linked.
|
||||||
|
"""
|
||||||
|
from app.internal import incident_correlator
|
||||||
|
|
||||||
|
call_id = call.get("call_id")
|
||||||
|
started_at = _parse_dt(call.get("started_at"))
|
||||||
|
if not call_id or not started_at:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# All data needed for correlation was stored by the first-pass extraction.
|
||||||
|
incident_id = await incident_correlator.correlate_call(
|
||||||
|
call_id = call_id,
|
||||||
|
node_id = call.get("node_id", ""),
|
||||||
|
system_id = call.get("system_id"),
|
||||||
|
talkgroup_id = call.get("talkgroup_id"),
|
||||||
|
talkgroup_name = call.get("talkgroup_name"),
|
||||||
|
tags = call.get("tags") or [],
|
||||||
|
incident_type = call.get("incident_type"),
|
||||||
|
location = call.get("location"),
|
||||||
|
location_coords= call.get("location_coords"),
|
||||||
|
cleared_units = call.get("cleared_units") or [],
|
||||||
|
reference_time = started_at, # anchor window to when the call happened
|
||||||
|
create_if_new = False, # never create — link-only
|
||||||
|
)
|
||||||
|
|
||||||
|
if incident_id:
|
||||||
|
await fstore.doc_set("calls", call_id, {"incident_ids": [incident_id]})
|
||||||
|
logger.info(
|
||||||
|
f"Re-correlation: linked orphaned call {call_id} → incident {incident_id}"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Increment the attempt counter. Once MAX_SWEEP_ATTEMPTS is reached the
|
||||||
|
# orphan filter above will stop picking this call up, and we write
|
||||||
|
# corr_path="unlinked" as a permanent tombstone.
|
||||||
|
attempts = call.get("corr_sweep_count", 0) + 1
|
||||||
|
update: dict = {"corr_sweep_count": attempts}
|
||||||
|
if attempts >= 3:
|
||||||
|
update["corr_path"] = "unlinked"
|
||||||
|
await fstore.doc_set("calls", call_id, update)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_dt(value) -> Optional[datetime]:
|
||||||
|
if not value:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(str(value).replace("Z", "+00:00"))
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=timezone.utc)
|
||||||
|
return dt
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
@@ -16,13 +16,18 @@ from app.config import settings
|
|||||||
|
|
||||||
|
|
||||||
async def summarizer_loop() -> None:
|
async def summarizer_loop() -> None:
|
||||||
|
from app.internal.feature_flags import get_flags
|
||||||
interval = settings.summary_interval_minutes * 60
|
interval = settings.summary_interval_minutes * 60
|
||||||
logger.info(f"Summarizer started — interval: {settings.summary_interval_minutes}m")
|
logger.info(f"Summarizer started — interval: {settings.summary_interval_minutes}m")
|
||||||
while True:
|
while True:
|
||||||
await asyncio.sleep(interval)
|
await asyncio.sleep(interval)
|
||||||
try:
|
try:
|
||||||
await _run_summary_pass()
|
flags = await get_flags()
|
||||||
await _resolve_stale_incidents()
|
if flags["summaries_enabled"]:
|
||||||
|
await _run_summary_pass()
|
||||||
|
await _resolve_stale_incidents()
|
||||||
|
else:
|
||||||
|
logger.info("Summaries disabled — skipping summary pass and stale incident sweep")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Summarizer pass failed: {e}")
|
logger.error(f"Summarizer pass failed: {e}")
|
||||||
|
|
||||||
@@ -97,6 +102,8 @@ async def _resolve_stale_incidents() -> None:
|
|||||||
idle_minutes = (now - updated_dt).total_seconds() / 60
|
idle_minutes = (now - updated_dt).total_seconds() / 60
|
||||||
if idle_minutes > settings.incident_auto_resolve_minutes:
|
if idle_minutes > settings.incident_auto_resolve_minutes:
|
||||||
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
||||||
|
from app.internal.incident_correlator import maybe_resolve_parent
|
||||||
|
await maybe_resolve_parent(incident_id)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Auto-resolved stale incident {incident_id} "
|
f"Auto-resolved stale incident {incident_id} "
|
||||||
f"(idle {idle_minutes:.0f}m)"
|
f"(idle {idle_minutes:.0f}m)"
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ async def transcribe_call(
|
|||||||
call_id: str,
|
call_id: str,
|
||||||
gcs_uri: str,
|
gcs_uri: str,
|
||||||
talkgroup_name: Optional[str] = None,
|
talkgroup_name: Optional[str] = None,
|
||||||
|
system_id: Optional[str] = None,
|
||||||
) -> tuple[Optional[str], list[dict]]:
|
) -> tuple[Optional[str], list[dict]]:
|
||||||
"""
|
"""
|
||||||
Transcribe audio at the given GCS URI and store the result in Firestore.
|
Transcribe audio at the given GCS URI and store the result in Firestore.
|
||||||
@@ -39,8 +40,17 @@ async def transcribe_call(
|
|||||||
if not gcs_uri or not gcs_uri.startswith("gs://"):
|
if not gcs_uri or not gcs_uri.startswith("gs://"):
|
||||||
return None, []
|
return None, []
|
||||||
|
|
||||||
|
# Load vocabulary for this system (empty list if none yet)
|
||||||
|
vocabulary: list[str] = []
|
||||||
|
if system_id:
|
||||||
|
from app.internal.vocabulary_learner import get_vocabulary
|
||||||
|
vocab_data = await get_vocabulary(system_id)
|
||||||
|
vocabulary = vocab_data.get("vocabulary") or []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
transcript, segments = await asyncio.to_thread(_sync_transcribe, gcs_uri, talkgroup_name)
|
transcript, segments = await asyncio.to_thread(
|
||||||
|
_sync_transcribe, gcs_uri, talkgroup_name, vocabulary
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Transcription failed for call {call_id}: {e}")
|
logger.warning(f"Transcription failed for call {call_id}: {e}")
|
||||||
return None, []
|
return None, []
|
||||||
@@ -61,7 +71,11 @@ async def transcribe_call(
|
|||||||
return transcript, segments
|
return transcript, segments
|
||||||
|
|
||||||
|
|
||||||
def _sync_transcribe(gcs_uri: str, talkgroup_name: Optional[str] = None) -> tuple[Optional[str], list[dict]]:
|
def _sync_transcribe(
|
||||||
|
gcs_uri: str,
|
||||||
|
talkgroup_name: Optional[str] = None,
|
||||||
|
vocabulary: Optional[list[str]] = None,
|
||||||
|
) -> tuple[Optional[str], list[dict]]:
|
||||||
"""Download audio from GCS and transcribe with OpenAI Whisper."""
|
"""Download audio from GCS and transcribe with OpenAI Whisper."""
|
||||||
from google.cloud import storage as gcs
|
from google.cloud import storage as gcs
|
||||||
from google.oauth2 import service_account
|
from google.oauth2 import service_account
|
||||||
@@ -94,7 +108,10 @@ def _sync_transcribe(gcs_uri: str, talkgroup_name: Optional[str] = None) -> tupl
|
|||||||
try:
|
try:
|
||||||
blob.download_to_filename(tmp_path)
|
blob.download_to_filename(tmp_path)
|
||||||
|
|
||||||
prompt = (f"Talkgroup: {talkgroup_name}. " + _WHISPER_PROMPT) if talkgroup_name else _WHISPER_PROMPT
|
from app.internal.vocabulary_learner import build_whisper_vocab_prompt
|
||||||
|
vocab_prefix = build_whisper_vocab_prompt(vocabulary or [])
|
||||||
|
tg_prefix = f"Talkgroup: {talkgroup_name}. " if talkgroup_name else ""
|
||||||
|
prompt = tg_prefix + vocab_prefix + _WHISPER_PROMPT
|
||||||
|
|
||||||
openai_client = OpenAI(api_key=settings.openai_api_key)
|
openai_client = OpenAI(api_key=settings.openai_api_key)
|
||||||
with open(tmp_path, "rb") as f:
|
with open(tmp_path, "rb") as f:
|
||||||
@@ -104,13 +121,31 @@ def _sync_transcribe(gcs_uri: str, talkgroup_name: Optional[str] = None) -> tupl
|
|||||||
language="en",
|
language="en",
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
response_format="verbose_json",
|
response_format="verbose_json",
|
||||||
|
temperature=0,
|
||||||
)
|
)
|
||||||
text = response.text.strip() or None
|
# Filter hallucinated segments. Two sources of hallucination in P25 recordings:
|
||||||
|
#
|
||||||
|
# 1. Trailing silence / static — Whisper fills silence past real content with
|
||||||
|
# sequential radio codes (10-4, 10-5...). Clamped by audio duration.
|
||||||
|
#
|
||||||
|
# 2. Leading silence — OP25 recordings typically have a short silence at the
|
||||||
|
# start before the first PTT press. Whisper sometimes hallucinates filler
|
||||||
|
# words or codes over this silence. Detected via no_speech_prob > 0.8
|
||||||
|
# (Whisper's own confidence that a segment contains no real speech).
|
||||||
|
audio_duration: float = getattr(response, "duration", None) or float("inf")
|
||||||
segments = [
|
segments = [
|
||||||
{"start": round(s.start, 2), "end": round(s.end, 2), "text": s.text.strip()}
|
{"start": round(s.start, 2), "end": round(s.end, 2), "text": s.text.strip()}
|
||||||
for s in (response.segments or [])
|
for s in (response.segments or [])
|
||||||
if s.text.strip()
|
if s.text.strip()
|
||||||
|
and s.start < audio_duration
|
||||||
|
and getattr(s, "no_speech_prob", 0.0) < 0.8
|
||||||
]
|
]
|
||||||
|
# Reconstruct text from non-hallucinated segments only so the two stay
|
||||||
|
# in sync. If every segment was filtered (e.g. pure static or repeated
|
||||||
|
# prompt-word hallucination like "Standby. Standby. Standby..."), text
|
||||||
|
# becomes None which prevents the intelligence pipeline from running on
|
||||||
|
# hallucinated content.
|
||||||
|
text = " ".join(s["text"] for s in segments) or None
|
||||||
return text, segments
|
return text, segments
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -0,0 +1,437 @@
|
|||||||
|
"""
|
||||||
|
Per-system vocabulary learning for STT accuracy improvement.
|
||||||
|
|
||||||
|
Three mechanisms:
|
||||||
|
1. Bootstrap — one-shot GPT-4o call generates local knowledge at system setup:
|
||||||
|
agencies + abbreviations, unit naming, streets, acronyms.
|
||||||
|
2. Correction — diffs admin transcript edits, extracts corrected tokens → vocabulary.
|
||||||
|
3. Induction — background loop samples N tokens of transcripts per system,
|
||||||
|
asks GPT-4o-mini to propose new terms → queued as pending for review.
|
||||||
|
|
||||||
|
Firestore schema additions on system documents:
|
||||||
|
vocabulary: list[str] — approved terms; injected into Whisper + GPT prompts
|
||||||
|
vocabulary_pending: list[dict] — induction suggestions awaiting admin review
|
||||||
|
each: {term, source, added_at}
|
||||||
|
vocabulary_bootstrapped: bool — bootstrap has been run at least once
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import difflib
|
||||||
|
import json
|
||||||
|
import random
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Optional
|
||||||
|
from app.internal.logger import logger
|
||||||
|
from app.internal import firestore as fstore
|
||||||
|
from app.config import settings
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Prompt templates
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
_BOOTSTRAP_PROMPT = """\
|
||||||
|
You are building a radio vocabulary dictionary to improve speech-to-text accuracy for a P25 \
|
||||||
|
public-safety radio monitoring system in a specific area. The STT model has no local knowledge, \
|
||||||
|
so common terms like "YVAC" get transcribed as "why vac", "5-baker" as "5 acre", etc.
|
||||||
|
|
||||||
|
System name: {system_name}
|
||||||
|
System type: {system_type}
|
||||||
|
Area context: {area_hint}
|
||||||
|
|
||||||
|
Return ONLY a JSON object:
|
||||||
|
{{"vocabulary": [list of strings]}}
|
||||||
|
|
||||||
|
Include terms you are confident about for this area:
|
||||||
|
- Agency names and their radio abbreviations (e.g. "YVAC" = Yorktown Volunteer Ambulance Corps)
|
||||||
|
- Unit ID examples using the local naming convention (e.g. "5-baker", "5-charlie", "1-david";
|
||||||
|
many departments use APCO phonetics: adam, baker, charles, david, edward, frank, george,
|
||||||
|
henry, ida, john, king, lincoln, mary, nora, ocean, paul, queen, robert, sam, tom, union,
|
||||||
|
victor, william, x-ray, young, zebra)
|
||||||
|
- Major routes, roads, and key intersections
|
||||||
|
- Local landmarks and geographic references dispatchers use
|
||||||
|
- Agency-specific codes that differ from standard APCO
|
||||||
|
|
||||||
|
Return a flat list of strings — abbreviations, proper names, unit IDs, street names.
|
||||||
|
Do NOT include common English words. Max 80 terms. Only include what you are confident is \
|
||||||
|
accurate for this specific area; return fewer terms rather than guessing."""
|
||||||
|
|
||||||
|
_INDUCTION_PROMPT = """\
|
||||||
|
You are analyzing P25 emergency radio transcripts to find vocabulary terms that should be \
|
||||||
|
added to improve future speech-to-text accuracy for this system.
|
||||||
|
|
||||||
|
System: {system_name}
|
||||||
|
Existing approved vocabulary (do not re-propose these): {existing_vocab}
|
||||||
|
|
||||||
|
Sampled transcripts:
|
||||||
|
{transcript_block}
|
||||||
|
|
||||||
|
Find terms that are LIKELY STT errors or local terms missing from the vocabulary:
|
||||||
|
- Unit IDs that appear garbled (e.g. "5 acre" → "5-baker")
|
||||||
|
- Agency acronyms spelled out phonetically (e.g. "why vac" → "YVAC")
|
||||||
|
- Street names or locations that look misspelled or oddly transcribed
|
||||||
|
- Callsigns or local codes not yet in the vocabulary
|
||||||
|
|
||||||
|
Return ONLY a JSON object:
|
||||||
|
{{"new_terms": ["term1", "term2", ...]}}
|
||||||
|
|
||||||
|
Only include high-confidence additions not already in existing vocabulary.
|
||||||
|
Return {{"new_terms": []}} if nothing new is found."""
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Public API
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async def bootstrap_system_vocabulary(system_id: str) -> list[str]:
|
||||||
|
"""
|
||||||
|
One-shot GPT-4o bootstrap: generate local-knowledge vocabulary for a system.
|
||||||
|
Merges generated terms into system.vocabulary and sets vocabulary_bootstrapped=True.
|
||||||
|
Returns the list of newly generated terms.
|
||||||
|
"""
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
logger.warning(f"Vocabulary bootstrap: system {system_id} not found")
|
||||||
|
return []
|
||||||
|
|
||||||
|
system_name = system_doc.get("name", "Unknown")
|
||||||
|
system_type = system_doc.get("type", "P25")
|
||||||
|
|
||||||
|
# Build area hint from configured talkgroup names
|
||||||
|
talkgroups = system_doc.get("config", {}).get("talkgroups", [])
|
||||||
|
tg_names = [tg.get("name", "") for tg in talkgroups if tg.get("name")][:8]
|
||||||
|
area_hint = f"Talkgroups include: {', '.join(tg_names)}" if tg_names else "Unknown area"
|
||||||
|
|
||||||
|
terms = await asyncio.to_thread(_sync_bootstrap, system_name, system_type, area_hint)
|
||||||
|
if not terms:
|
||||||
|
return []
|
||||||
|
|
||||||
|
existing = system_doc.get("vocabulary") or []
|
||||||
|
existing_lower = {t.lower() for t in existing}
|
||||||
|
to_add = [t for t in terms if t.lower() not in existing_lower]
|
||||||
|
merged = list(dict.fromkeys(existing + to_add))
|
||||||
|
|
||||||
|
await fstore.doc_set("systems", system_id, {
|
||||||
|
"vocabulary": merged,
|
||||||
|
"vocabulary_bootstrapped": True,
|
||||||
|
})
|
||||||
|
logger.info(
|
||||||
|
f"Vocabulary bootstrap: {len(to_add)} term(s) generated for system {system_id} "
|
||||||
|
f"({system_name})"
|
||||||
|
)
|
||||||
|
return to_add
|
||||||
|
|
||||||
|
|
||||||
|
async def learn_from_correction(system_id: str, original: str, corrected: str) -> None:
|
||||||
|
"""
|
||||||
|
Diff original and corrected transcripts; append new tokens to the approved vocabulary.
|
||||||
|
Called automatically when an admin saves a transcript correction.
|
||||||
|
"""
|
||||||
|
if not system_id or not original or not corrected:
|
||||||
|
return
|
||||||
|
|
||||||
|
new_terms = _diff_new_terms(original, corrected)
|
||||||
|
if not new_terms:
|
||||||
|
return
|
||||||
|
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
return
|
||||||
|
|
||||||
|
existing = system_doc.get("vocabulary") or []
|
||||||
|
existing_lower = {t.lower() for t in existing}
|
||||||
|
to_add = [t for t in new_terms if t.lower() not in existing_lower]
|
||||||
|
if not to_add:
|
||||||
|
return
|
||||||
|
|
||||||
|
merged = list(dict.fromkeys(existing + to_add))
|
||||||
|
await fstore.doc_set("systems", system_id, {"vocabulary": merged})
|
||||||
|
logger.info(
|
||||||
|
f"Vocabulary: learned {len(to_add)} term(s) from correction on system {system_id}: "
|
||||||
|
f"{to_add}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def approve_pending_term(system_id: str, term: str) -> None:
|
||||||
|
"""Move a pending term into the approved vocabulary."""
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
return
|
||||||
|
pending = [p for p in (system_doc.get("vocabulary_pending") or []) if p["term"] != term]
|
||||||
|
vocab = system_doc.get("vocabulary") or []
|
||||||
|
if term.lower() not in {t.lower() for t in vocab}:
|
||||||
|
vocab = list(dict.fromkeys(vocab + [term]))
|
||||||
|
await fstore.doc_set("systems", system_id, {
|
||||||
|
"vocabulary": vocab,
|
||||||
|
"vocabulary_pending": pending,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
async def dismiss_pending_term(system_id: str, term: str) -> None:
|
||||||
|
"""Remove a pending term without adding it to vocabulary."""
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
return
|
||||||
|
pending = [p for p in (system_doc.get("vocabulary_pending") or []) if p["term"] != term]
|
||||||
|
await fstore.doc_set("systems", system_id, {"vocabulary_pending": pending})
|
||||||
|
|
||||||
|
|
||||||
|
async def add_term(system_id: str, term: str) -> None:
|
||||||
|
"""Manually add a term to the approved vocabulary."""
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
return
|
||||||
|
vocab = system_doc.get("vocabulary") or []
|
||||||
|
if term.lower() not in {t.lower() for t in vocab}:
|
||||||
|
vocab = list(dict.fromkeys(vocab + [term.strip()]))
|
||||||
|
await fstore.doc_set("systems", system_id, {"vocabulary": vocab})
|
||||||
|
|
||||||
|
|
||||||
|
async def remove_term(system_id: str, term: str) -> None:
|
||||||
|
"""Remove a term from the approved vocabulary."""
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
return
|
||||||
|
vocab = [t for t in (system_doc.get("vocabulary") or []) if t.lower() != term.lower()]
|
||||||
|
await fstore.doc_set("systems", system_id, {"vocabulary": vocab})
|
||||||
|
|
||||||
|
|
||||||
|
async def get_vocabulary(system_id: str) -> dict:
|
||||||
|
"""Return vocabulary and pending terms for a system (TTL-cached, 5 min)."""
|
||||||
|
doc = await fstore.doc_get_cached("systems", system_id)
|
||||||
|
if not doc:
|
||||||
|
return {"vocabulary": [], "vocabulary_pending": [], "vocabulary_bootstrapped": False}
|
||||||
|
return {
|
||||||
|
"vocabulary": doc.get("vocabulary") or [],
|
||||||
|
"vocabulary_pending": doc.get("vocabulary_pending") or [],
|
||||||
|
"vocabulary_bootstrapped": doc.get("vocabulary_bootstrapped", False),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Prompt-injection helpers (called by transcription.py and intelligence.py)
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def build_whisper_vocab_prompt(vocabulary: list[str]) -> str:
|
||||||
|
"""
|
||||||
|
Format vocabulary for Whisper prompt injection.
|
||||||
|
Whisper's prompt field acts as a context prior with a ~224-token limit.
|
||||||
|
The base _WHISPER_PROMPT uses ~70 tokens; we budget ~150 tokens (≈550 chars) here.
|
||||||
|
"""
|
||||||
|
if not vocabulary:
|
||||||
|
return ""
|
||||||
|
char_budget = 550
|
||||||
|
terms: list[str] = []
|
||||||
|
used = 0
|
||||||
|
for term in vocabulary:
|
||||||
|
cost = len(term) + 2 # ", "
|
||||||
|
if used + cost > char_budget:
|
||||||
|
break
|
||||||
|
terms.append(term)
|
||||||
|
used += cost
|
||||||
|
return ", ".join(terms) + ". " if terms else ""
|
||||||
|
|
||||||
|
|
||||||
|
def build_gpt_vocab_block(vocabulary: list[str]) -> str:
|
||||||
|
"""Format vocabulary for injection into GPT extraction prompts."""
|
||||||
|
if not vocabulary:
|
||||||
|
return ""
|
||||||
|
return f"Known local terms: {', '.join(vocabulary)}\n"
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Background induction loop
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async def vocabulary_induction_loop() -> None:
|
||||||
|
from app.internal.feature_flags import get_flags
|
||||||
|
interval = settings.vocabulary_induction_interval_hours * 3600
|
||||||
|
logger.info(
|
||||||
|
f"Vocabulary induction loop started — "
|
||||||
|
f"interval: {settings.vocabulary_induction_interval_hours}h, "
|
||||||
|
f"sample budget: {settings.vocabulary_induction_sample_tokens} tokens"
|
||||||
|
)
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(interval)
|
||||||
|
try:
|
||||||
|
flags = await get_flags()
|
||||||
|
if flags["vocabulary_learning_enabled"]:
|
||||||
|
await _run_induction_pass()
|
||||||
|
else:
|
||||||
|
logger.info("Vocabulary learning disabled — skipping induction pass")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Vocabulary induction pass failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def _run_induction_pass() -> None:
|
||||||
|
systems = await fstore.collection_list("systems")
|
||||||
|
if not systems:
|
||||||
|
return
|
||||||
|
logger.info(f"Vocabulary induction: processing {len(systems)} system(s)")
|
||||||
|
for system in systems:
|
||||||
|
system_id = system.get("system_id")
|
||||||
|
if system_id:
|
||||||
|
try:
|
||||||
|
await _induct_system(system_id, system)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Induction failed for system {system_id}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def _induct_system(system_id: str, system_doc: dict) -> None:
|
||||||
|
"""Sample random transcripts for a system and propose new vocabulary."""
|
||||||
|
system_name = system_doc.get("name", "Unknown")
|
||||||
|
existing_vocab: list[str] = system_doc.get("vocabulary") or []
|
||||||
|
|
||||||
|
# Fetch calls from the last 7 days only — avoids scanning the entire history.
|
||||||
|
# Active calls have ended_at=None and are excluded by the range filter automatically.
|
||||||
|
# Needs a composite index on (system_id ASC, ended_at ASC).
|
||||||
|
cutoff = datetime.now(timezone.utc) - timedelta(days=7)
|
||||||
|
all_calls = await fstore.collection_where("calls", [
|
||||||
|
("system_id", "==", system_id),
|
||||||
|
("ended_at", ">=", cutoff),
|
||||||
|
])
|
||||||
|
if not all_calls:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Random sample up to the token budget (4 chars ≈ 1 token)
|
||||||
|
random.shuffle(all_calls)
|
||||||
|
char_budget = settings.vocabulary_induction_sample_tokens * 4
|
||||||
|
transcript_block = ""
|
||||||
|
sampled = 0
|
||||||
|
for call in all_calls:
|
||||||
|
text = call.get("transcript_corrected") or call.get("transcript") or ""
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
if len(transcript_block) + len(text) > char_budget:
|
||||||
|
break
|
||||||
|
tg = call.get("talkgroup_name") or f"TGID {call.get('talkgroup_id', '?')}"
|
||||||
|
transcript_block += f"[{tg}] {text}\n"
|
||||||
|
sampled += 1
|
||||||
|
|
||||||
|
if sampled < 3:
|
||||||
|
return # not enough data to learn from yet
|
||||||
|
|
||||||
|
new_terms = await asyncio.to_thread(
|
||||||
|
_sync_induct, system_name, existing_vocab, transcript_block
|
||||||
|
)
|
||||||
|
if not new_terms:
|
||||||
|
return
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
existing_pending: list[dict] = system_doc.get("vocabulary_pending") or []
|
||||||
|
pending_lower = {p["term"].lower() for p in existing_pending}
|
||||||
|
vocab_lower = {t.lower() for t in existing_vocab}
|
||||||
|
|
||||||
|
to_queue = [
|
||||||
|
{"term": t, "source": "induction", "added_at": now}
|
||||||
|
for t in new_terms
|
||||||
|
if t.lower() not in vocab_lower and t.lower() not in pending_lower
|
||||||
|
]
|
||||||
|
if not to_queue:
|
||||||
|
return
|
||||||
|
|
||||||
|
await fstore.doc_set("systems", system_id, {
|
||||||
|
"vocabulary_pending": existing_pending + to_queue,
|
||||||
|
})
|
||||||
|
logger.info(
|
||||||
|
f"Vocabulary induction: {len(to_queue)} new term(s) proposed for "
|
||||||
|
f"system {system_id} ({system_name}): {[p['term'] for p in to_queue]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
# Internal sync helpers
|
||||||
|
# ─────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
_STOP_WORDS = {
|
||||||
|
"the", "and", "for", "are", "was", "were", "this", "that", "with",
|
||||||
|
"have", "has", "had", "but", "not", "from", "they", "will", "what",
|
||||||
|
"can", "all", "been", "one", "two", "three", "four", "five", "six",
|
||||||
|
"you", "out", "who", "get", "her", "him", "his", "its", "our", "my",
|
||||||
|
"via", "per", "any", "now", "got", "she", "let", "did", "may", "yes",
|
||||||
|
"sir", "say", "see", "too", "off", "how", "put", "set", "try", "back",
|
||||||
|
"just", "like", "into", "than", "them", "then", "some", "also", "onto",
|
||||||
|
"went", "over", "copy", "okay", "unit", "post", "road", "lane", "going",
|
||||||
|
"being", "doing", "there", "their", "about", "would", "could", "should",
|
||||||
|
"route", "north", "south", "east", "west", "avenue", "street", "drive",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _diff_new_terms(original: str, corrected: str) -> list[str]:
|
||||||
|
"""
|
||||||
|
Token-level diff: find tokens in `corrected` that replaced or were inserted
|
||||||
|
relative to `original`. These are the admin's intended spellings — good
|
||||||
|
candidates for vocabulary.
|
||||||
|
"""
|
||||||
|
orig_tokens = original.split()
|
||||||
|
corr_tokens = corrected.split()
|
||||||
|
|
||||||
|
matcher = difflib.SequenceMatcher(None,
|
||||||
|
[t.lower() for t in orig_tokens],
|
||||||
|
[t.lower() for t in corr_tokens],
|
||||||
|
)
|
||||||
|
new_terms: list[str] = []
|
||||||
|
for tag, _i1, _i2, j1, j2 in matcher.get_opcodes():
|
||||||
|
if tag in ("insert", "replace"):
|
||||||
|
for tok in corr_tokens[j1:j2]:
|
||||||
|
clean = tok.strip(".,!?;:()'\"").strip("-")
|
||||||
|
if len(clean) >= 3 and clean.lower() not in _STOP_WORDS:
|
||||||
|
new_terms.append(clean)
|
||||||
|
|
||||||
|
return list(dict.fromkeys(new_terms))
|
||||||
|
|
||||||
|
|
||||||
|
def _sync_bootstrap(system_name: str, system_type: str, area_hint: str) -> list[str]:
|
||||||
|
from app.config import settings as cfg
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
if not cfg.openai_api_key:
|
||||||
|
return []
|
||||||
|
|
||||||
|
prompt = _BOOTSTRAP_PROMPT.format(
|
||||||
|
system_name=system_name,
|
||||||
|
system_type=system_type,
|
||||||
|
area_hint=area_hint,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
client = OpenAI(api_key=cfg.openai_api_key)
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model="gpt-4o",
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
response_format={"type": "json_object"},
|
||||||
|
)
|
||||||
|
data = json.loads(response.choices[0].message.content)
|
||||||
|
terms = data.get("vocabulary") or []
|
||||||
|
return [str(t).strip() for t in terms if str(t).strip()]
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Vocabulary bootstrap GPT call failed: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _sync_induct(
|
||||||
|
system_name: str, existing_vocab: list[str], transcript_block: str
|
||||||
|
) -> list[str]:
|
||||||
|
from app.config import settings as cfg
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
if not cfg.openai_api_key:
|
||||||
|
return []
|
||||||
|
|
||||||
|
vocab_str = ", ".join(existing_vocab[:80]) if existing_vocab else "(none yet)"
|
||||||
|
prompt = _INDUCTION_PROMPT.format(
|
||||||
|
system_name=system_name,
|
||||||
|
existing_vocab=vocab_str,
|
||||||
|
transcript_block=transcript_block[:8000],
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
client = OpenAI(api_key=cfg.openai_api_key)
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model="gpt-4o-mini",
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
response_format={"type": "json_object"},
|
||||||
|
)
|
||||||
|
data = json.loads(response.choices[0].message.content)
|
||||||
|
terms = data.get("new_terms") or []
|
||||||
|
return [str(t).strip() for t in terms if str(t).strip()]
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Vocabulary induction GPT call failed: {e}")
|
||||||
|
return []
|
||||||
+10
-3
@@ -6,9 +6,11 @@ from app.internal.logger import logger
|
|||||||
from app.internal.mqtt_handler import mqtt_handler
|
from app.internal.mqtt_handler import mqtt_handler
|
||||||
from app.internal.node_sweeper import sweeper_loop
|
from app.internal.node_sweeper import sweeper_loop
|
||||||
from app.internal.summarizer import summarizer_loop
|
from app.internal.summarizer import summarizer_loop
|
||||||
|
from app.internal.vocabulary_learner import vocabulary_induction_loop
|
||||||
|
from app.internal.recorrelation_sweep import recorrelation_loop
|
||||||
from app.config import settings
|
from app.config import settings
|
||||||
from app.internal.auth import require_firebase_token, require_service_or_firebase_token
|
from app.internal.auth import require_firebase_token, require_service_or_firebase_token
|
||||||
from app.routers import nodes, systems, calls, upload, tokens, incidents, alerts
|
from app.routers import nodes, systems, calls, upload, tokens, incidents, alerts, admin
|
||||||
from app.internal import firestore as fstore
|
from app.internal import firestore as fstore
|
||||||
|
|
||||||
|
|
||||||
@@ -35,14 +37,18 @@ async def lifespan(app: FastAPI):
|
|||||||
await _release_orphaned_tokens()
|
await _release_orphaned_tokens()
|
||||||
|
|
||||||
await mqtt_handler.connect()
|
await mqtt_handler.connect()
|
||||||
sweeper_task = asyncio.create_task(sweeper_loop())
|
sweeper_task = asyncio.create_task(sweeper_loop())
|
||||||
summarizer_task = asyncio.create_task(summarizer_loop())
|
summarizer_task = asyncio.create_task(summarizer_loop())
|
||||||
|
induction_task = asyncio.create_task(vocabulary_induction_loop())
|
||||||
|
recorrelation_task = asyncio.create_task(recorrelation_loop())
|
||||||
|
|
||||||
yield # --- app running ---
|
yield # --- app running ---
|
||||||
|
|
||||||
logger.info("DRB C2 Core shutting down.")
|
logger.info("DRB C2 Core shutting down.")
|
||||||
sweeper_task.cancel()
|
sweeper_task.cancel()
|
||||||
summarizer_task.cancel()
|
summarizer_task.cancel()
|
||||||
|
induction_task.cancel()
|
||||||
|
recorrelation_task.cancel()
|
||||||
await mqtt_handler.disconnect()
|
await mqtt_handler.disconnect()
|
||||||
|
|
||||||
|
|
||||||
@@ -63,6 +69,7 @@ app.include_router(tokens.router, dependencies=[Depends(require_service_or_fi
|
|||||||
app.include_router(incidents.router, dependencies=[Depends(require_service_or_firebase_token)])
|
app.include_router(incidents.router, dependencies=[Depends(require_service_or_firebase_token)])
|
||||||
app.include_router(alerts.router, dependencies=[Depends(require_service_or_firebase_token)])
|
app.include_router(alerts.router, dependencies=[Depends(require_service_or_firebase_token)])
|
||||||
app.include_router(upload.router) # auth is per-node, handled inline
|
app.include_router(upload.router) # auth is per-node, handled inline
|
||||||
|
app.include_router(admin.router) # auth is per-endpoint (read: firebase, write: admin)
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health")
|
@app.get("/health")
|
||||||
|
|||||||
@@ -33,12 +33,14 @@ class SystemRecord(BaseModel):
|
|||||||
name: str
|
name: str
|
||||||
type: str # P25 / DMR / NBFM
|
type: str # P25 / DMR / NBFM
|
||||||
config: Dict[str, Any] = {} # OP25-compatible config blob
|
config: Dict[str, Any] = {} # OP25-compatible config blob
|
||||||
|
ten_codes: Dict[str, str] = {} # {"10-10": "Commercial Alarm", ...}
|
||||||
|
|
||||||
|
|
||||||
class SystemCreate(BaseModel):
|
class SystemCreate(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
type: str
|
type: str
|
||||||
config: Dict[str, Any] = {}
|
config: Dict[str, Any] = {}
|
||||||
|
ten_codes: Dict[str, str] = {}
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
@@ -56,11 +58,11 @@ class CallRecord(BaseModel):
|
|||||||
started_at: datetime
|
started_at: datetime
|
||||||
ended_at: Optional[datetime] = None
|
ended_at: Optional[datetime] = None
|
||||||
audio_url: Optional[str] = None
|
audio_url: Optional[str] = None
|
||||||
transcript: Optional[str] = None # populated later by STT
|
transcript: Optional[str] = None # populated later by STT
|
||||||
incident_id: Optional[str] = None # populated later by intelligence layer
|
incident_ids: List[str] = [] # one per scene detected in the recording
|
||||||
location: Optional[Dict[str, float]] = None # {lat, lng}
|
location: Optional[Dict[str, float]] = None # {lat, lng}
|
||||||
tags: List[str] = []
|
tags: List[str] = []
|
||||||
status: str = "active" # active / ended
|
status: str = "active" # active / ended
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -0,0 +1,17 @@
|
|||||||
|
from fastapi import APIRouter, Depends
|
||||||
|
from app.internal.auth import require_admin_token, require_firebase_token
|
||||||
|
from app.internal.feature_flags import get_flags, set_flags
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/admin", tags=["admin"])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/features")
|
||||||
|
async def get_feature_flags(_=Depends(require_firebase_token)):
|
||||||
|
"""Return the current AI feature flag state. Any authenticated user can read."""
|
||||||
|
return await get_flags()
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/features")
|
||||||
|
async def update_feature_flags(body: dict, _=Depends(require_admin_token)):
|
||||||
|
"""Update one or more AI feature flags. Admin only."""
|
||||||
|
return await set_flags(body)
|
||||||
@@ -83,6 +83,35 @@ async def patch_transcript(
|
|||||||
"embedding": None,
|
"embedding": None,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
# Unlink from ALL current incidents so re-correlation starts clean.
|
||||||
|
# Handles both old single incident_id and new incident_ids list.
|
||||||
|
old_ids: list[str] = call.get("incident_ids") or (
|
||||||
|
[call["incident_id"]] if call.get("incident_id") else []
|
||||||
|
)
|
||||||
|
for old_incident_id in old_ids:
|
||||||
|
old_incident = await fstore.doc_get("incidents", old_incident_id)
|
||||||
|
if old_incident:
|
||||||
|
remaining = [c for c in (old_incident.get("call_ids") or []) if c != call_id]
|
||||||
|
if remaining:
|
||||||
|
await fstore.doc_set("incidents", old_incident_id, {
|
||||||
|
"call_ids": remaining,
|
||||||
|
"summary_stale": True,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
await fstore.doc_set("incidents", old_incident_id, {
|
||||||
|
"call_ids": [],
|
||||||
|
"status": "resolved",
|
||||||
|
"summary_stale": True,
|
||||||
|
})
|
||||||
|
await fstore.doc_set("calls", call_id, {"incident_ids": [], "incident_id": None})
|
||||||
|
|
||||||
|
# Learn from the correction: diff original → corrected and add new tokens to vocabulary
|
||||||
|
system_id = call.get("system_id")
|
||||||
|
original_text = call.get("transcript_corrected") or call.get("transcript") or ""
|
||||||
|
if system_id and original_text:
|
||||||
|
from app.internal.vocabulary_learner import learn_from_correction
|
||||||
|
await learn_from_correction(system_id, original_text, body.transcript)
|
||||||
|
|
||||||
from app.routers.upload import _run_extraction_pipeline
|
from app.routers.upload import _run_extraction_pipeline
|
||||||
background_tasks.add_task(
|
background_tasks.add_task(
|
||||||
_run_extraction_pipeline,
|
_run_extraction_pipeline,
|
||||||
|
|||||||
@@ -53,12 +53,24 @@ async def send_command(node_id: str, cmd: CommandPayload):
|
|||||||
payload = cmd.model_dump(exclude_none=True)
|
payload = cmd.model_dump(exclude_none=True)
|
||||||
|
|
||||||
if cmd.action == "discord_join":
|
if cmd.action == "discord_join":
|
||||||
preferred = payload.pop("preferred_token_id", None)
|
# Resolve system doc once — used for preferred token and presence name.
|
||||||
|
system_doc = None
|
||||||
|
system_id = node.get("assigned_system_id")
|
||||||
|
if system_id:
|
||||||
|
system_doc = await fstore.doc_get_cached("systems", system_id)
|
||||||
|
|
||||||
|
# Explicit preferred_token_id in the request beats the system-level preference.
|
||||||
|
preferred = payload.pop("preferred_token_id", None) or (system_doc or {}).get("preferred_token_id")
|
||||||
token = await assign_token(node_id, preferred_token_id=preferred)
|
token = await assign_token(node_id, preferred_token_id=preferred)
|
||||||
if not token:
|
if not token:
|
||||||
raise HTTPException(503, "No Discord bot tokens available in the pool.")
|
raise HTTPException(503, "No Discord bot tokens available in the pool.")
|
||||||
payload["token"] = token
|
payload["token"] = token
|
||||||
|
|
||||||
|
# Pass system name so the bot can set its Discord presence on join.
|
||||||
|
system_name = (system_doc or {}).get("name")
|
||||||
|
if system_name:
|
||||||
|
payload["system_name"] = system_name
|
||||||
|
|
||||||
elif cmd.action == "discord_leave":
|
elif cmd.action == "discord_leave":
|
||||||
await release_token(node_id)
|
await release_token(node_id)
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,26 @@
|
|||||||
import uuid
|
import uuid
|
||||||
from fastapi import APIRouter, HTTPException
|
from fastapi import APIRouter, HTTPException
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import Dict, Optional
|
||||||
from app.models import SystemCreate, SystemRecord
|
from app.models import SystemCreate, SystemRecord
|
||||||
from app.internal import firestore as fstore
|
from app.internal import firestore as fstore
|
||||||
|
|
||||||
router = APIRouter(prefix="/systems", tags=["systems"])
|
router = APIRouter(prefix="/systems", tags=["systems"])
|
||||||
|
|
||||||
|
|
||||||
|
class VocabularyTermBody(BaseModel):
|
||||||
|
term: str
|
||||||
|
|
||||||
|
|
||||||
|
class TenCodesBody(BaseModel):
|
||||||
|
ten_codes: Dict[str, str]
|
||||||
|
|
||||||
|
|
||||||
|
class AiFlagsBody(BaseModel):
|
||||||
|
stt_enabled: Optional[bool] = None
|
||||||
|
correlation_enabled: Optional[bool] = None
|
||||||
|
|
||||||
|
|
||||||
@router.get("")
|
@router.get("")
|
||||||
async def list_systems():
|
async def list_systems():
|
||||||
return await fstore.collection_list("systems")
|
return await fstore.collection_list("systems")
|
||||||
@@ -42,3 +57,115 @@ async def delete_system(system_id: str):
|
|||||||
if not existing:
|
if not existing:
|
||||||
raise HTTPException(404, f"System '{system_id}' not found.")
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
await fstore.doc_delete("systems", system_id)
|
await fstore.doc_delete("systems", system_id)
|
||||||
|
|
||||||
|
|
||||||
|
# ── Per-system AI flag overrides ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
@router.put("/{system_id}/ai-flags")
|
||||||
|
async def update_system_ai_flags(system_id: str, body: AiFlagsBody):
|
||||||
|
"""
|
||||||
|
Set per-system AI flag overrides. Only fields included in the body are
|
||||||
|
written; omitted fields remain unchanged (or absent, meaning inherit global).
|
||||||
|
Pass null to clear an override and fall back to the global flag.
|
||||||
|
"""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
|
||||||
|
current: dict = existing.get("ai_flags") or {}
|
||||||
|
for field, value in body.model_dump(exclude_unset=True).items():
|
||||||
|
if value is None:
|
||||||
|
current.pop(field, None) # clear override → inherit global
|
||||||
|
else:
|
||||||
|
current[field] = value
|
||||||
|
|
||||||
|
await fstore.doc_update("systems", system_id, {"ai_flags": current})
|
||||||
|
return {"ok": True, "ai_flags": current}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Ten-codes endpoints ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@router.get("/{system_id}/ten-codes")
|
||||||
|
async def get_ten_codes(system_id: str):
|
||||||
|
"""Return the ten-code dictionary for a system."""
|
||||||
|
system = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
return {"ten_codes": system.get("ten_codes") or {}}
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/{system_id}/ten-codes")
|
||||||
|
async def update_ten_codes(system_id: str, body: TenCodesBody):
|
||||||
|
"""Replace the ten-code dictionary for a system."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
await fstore.doc_update("systems", system_id, {"ten_codes": body.ten_codes})
|
||||||
|
return {"ok": True, "ten_codes": body.ten_codes}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Vocabulary endpoints ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@router.get("/{system_id}/vocabulary")
|
||||||
|
async def get_vocabulary(system_id: str):
|
||||||
|
"""Return approved vocabulary and pending induction suggestions."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import get_vocabulary as _get
|
||||||
|
return await _get(system_id)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{system_id}/vocabulary/bootstrap", status_code=202)
|
||||||
|
async def bootstrap_vocabulary(system_id: str):
|
||||||
|
"""Trigger a one-shot GPT-4o bootstrap to seed the vocabulary from local knowledge."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import bootstrap_system_vocabulary
|
||||||
|
terms = await bootstrap_system_vocabulary(system_id)
|
||||||
|
return {"added": len(terms), "terms": terms}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{system_id}/vocabulary/terms")
|
||||||
|
async def add_vocabulary_term(system_id: str, body: VocabularyTermBody):
|
||||||
|
"""Manually add a term to the approved vocabulary."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import add_term
|
||||||
|
await add_term(system_id, body.term.strip())
|
||||||
|
return {"ok": True}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/{system_id}/vocabulary/terms")
|
||||||
|
async def remove_vocabulary_term(system_id: str, body: VocabularyTermBody):
|
||||||
|
"""Remove a term from the approved vocabulary."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import remove_term
|
||||||
|
await remove_term(system_id, body.term)
|
||||||
|
return {"ok": True}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{system_id}/vocabulary/pending/approve")
|
||||||
|
async def approve_pending(system_id: str, body: VocabularyTermBody):
|
||||||
|
"""Move a pending induction suggestion into the approved vocabulary."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import approve_pending_term
|
||||||
|
await approve_pending_term(system_id, body.term)
|
||||||
|
return {"ok": True}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{system_id}/vocabulary/pending/dismiss")
|
||||||
|
async def dismiss_pending(system_id: str, body: VocabularyTermBody):
|
||||||
|
"""Dismiss a pending induction suggestion without adding it."""
|
||||||
|
existing = await fstore.doc_get("systems", system_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"System '{system_id}' not found.")
|
||||||
|
from app.internal.vocabulary_learner import dismiss_pending_term
|
||||||
|
await dismiss_pending_term(system_id, body.term)
|
||||||
|
return {"ok": True}
|
||||||
|
|||||||
@@ -60,6 +60,34 @@ async def flush_tokens():
|
|||||||
return {"released": len(results)}
|
return {"released": len(results)}
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/{token_id}/prefer/{system_id}", status_code=200)
|
||||||
|
async def set_preferred_system(token_id: str, system_id: str):
|
||||||
|
"""
|
||||||
|
Mark this token as the preferred bot for a system.
|
||||||
|
When a discord_join is issued for any node in that system, this token
|
||||||
|
is tried first before falling back to the general pool.
|
||||||
|
Pass system_id="_none" to clear the preference.
|
||||||
|
"""
|
||||||
|
existing = await fstore.doc_get("bot_tokens", token_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, "Token not found.")
|
||||||
|
if system_id == "_none":
|
||||||
|
# Clear any existing preference on the system that pointed to this token.
|
||||||
|
system_doc = await fstore.doc_get("systems", existing.get("preferred_for_system_id", ""))
|
||||||
|
if system_doc:
|
||||||
|
await fstore.doc_set("systems", existing["preferred_for_system_id"], {"preferred_token_id": None})
|
||||||
|
await fstore.doc_set("bot_tokens", token_id, {"preferred_for_system_id": None})
|
||||||
|
return {"ok": True, "preferred_for_system_id": None}
|
||||||
|
|
||||||
|
system_doc = await fstore.doc_get("systems", system_id)
|
||||||
|
if not system_doc:
|
||||||
|
raise HTTPException(404, "System not found.")
|
||||||
|
# Set preference on both sides for easy lookup in either direction.
|
||||||
|
await fstore.doc_set("systems", system_id, {"preferred_token_id": token_id})
|
||||||
|
await fstore.doc_set("bot_tokens", token_id, {"preferred_for_system_id": system_id})
|
||||||
|
return {"ok": True, "preferred_for_system_id": system_id}
|
||||||
|
|
||||||
|
|
||||||
@router.delete("/{token_id}", status_code=204)
|
@router.delete("/{token_id}", status_code=204)
|
||||||
async def delete_token(token_id: str):
|
async def delete_token(token_id: str):
|
||||||
existing = await fstore.doc_get("bot_tokens", token_id)
|
existing = await fstore.doc_get("bot_tokens", token_id)
|
||||||
|
|||||||
@@ -96,35 +96,54 @@ async def _run_extraction_pipeline(
|
|||||||
"""Run steps 2-4 of the intelligence pipeline using an existing transcript."""
|
"""Run steps 2-4 of the intelligence pipeline using an existing transcript."""
|
||||||
from app.internal import intelligence, incident_correlator, alerter
|
from app.internal import intelligence, incident_correlator, alerter
|
||||||
|
|
||||||
tags, incident_type, location, location_coords, resolved = await intelligence.extract_tags(
|
# Step 2: Scene detection + intelligence extraction.
|
||||||
|
# Returns one scene per distinct incident detected in the recording.
|
||||||
|
scenes = await intelligence.extract_scenes(
|
||||||
call_id, transcript, talkgroup_name,
|
call_id, transcript, talkgroup_name,
|
||||||
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
|
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
|
||||||
node_id=node_id,
|
node_id=node_id,
|
||||||
preserve_transcript_correction=preserve_transcript_correction,
|
preserve_transcript_correction=preserve_transcript_correction,
|
||||||
)
|
)
|
||||||
|
|
||||||
incident_id = await incident_correlator.correlate_call(
|
# Step 3: Correlate each scene to an incident independently.
|
||||||
call_id=call_id,
|
incident_ids: list[str] = []
|
||||||
node_id=node_id,
|
all_tags: list[str] = []
|
||||||
system_id=system_id,
|
for scene in scenes:
|
||||||
talkgroup_id=talkgroup_id,
|
all_tags.extend(scene["tags"])
|
||||||
talkgroup_name=talkgroup_name,
|
# When dispatch is pulling a unit to a NEW call (reassignment), suppress unit
|
||||||
tags=tags,
|
# overlap so the new scene doesn't chain into the unit's previous incident.
|
||||||
incident_type=incident_type,
|
corr_units = [] if scene.get("reassignment") else scene.get("units")
|
||||||
location=location,
|
incident_id = await incident_correlator.correlate_call(
|
||||||
location_coords=location_coords,
|
call_id=call_id,
|
||||||
)
|
node_id=node_id,
|
||||||
|
system_id=system_id,
|
||||||
|
talkgroup_id=talkgroup_id,
|
||||||
|
talkgroup_name=talkgroup_name,
|
||||||
|
tags=scene["tags"],
|
||||||
|
incident_type=scene["incident_type"],
|
||||||
|
location=scene["location"],
|
||||||
|
location_coords=scene["location_coords"],
|
||||||
|
units=corr_units,
|
||||||
|
vehicles=scene.get("vehicles"),
|
||||||
|
cleared_units=scene.get("cleared_units"),
|
||||||
|
)
|
||||||
|
if incident_id and incident_id not in incident_ids:
|
||||||
|
incident_ids.append(incident_id)
|
||||||
|
if scene["resolved"] and incident_id:
|
||||||
|
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
||||||
|
await incident_correlator.maybe_resolve_parent(incident_id)
|
||||||
|
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
|
||||||
|
|
||||||
if resolved and incident_id:
|
if incident_ids:
|
||||||
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
await fstore.doc_set("calls", call_id, {"incident_ids": incident_ids})
|
||||||
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
|
|
||||||
|
|
||||||
|
# Step 4: Alert dispatch — run once with merged tags from all scenes.
|
||||||
await alerter.check_and_dispatch(
|
await alerter.check_and_dispatch(
|
||||||
call_id=call_id,
|
call_id=call_id,
|
||||||
node_id=node_id,
|
node_id=node_id,
|
||||||
talkgroup_id=talkgroup_id,
|
talkgroup_id=talkgroup_id,
|
||||||
talkgroup_name=talkgroup_name,
|
talkgroup_name=talkgroup_name,
|
||||||
tags=tags,
|
tags=list(dict.fromkeys(all_tags)),
|
||||||
transcript=transcript,
|
transcript=transcript,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -140,48 +159,101 @@ async def _run_intelligence_pipeline(
|
|||||||
"""
|
"""
|
||||||
Post-upload intelligence pipeline (runs as a background task):
|
Post-upload intelligence pipeline (runs as a background task):
|
||||||
1. Transcribe audio via Google STT
|
1. Transcribe audio via Google STT
|
||||||
2. Extract tags/incident type from transcript
|
2. Detect scenes + extract intelligence (one result per incident in recording)
|
||||||
3. Correlate with existing incidents (or create new one)
|
3. Correlate each scene with existing incidents (or create new ones)
|
||||||
4. Check alert rules and dispatch notifications
|
4. Check alert rules and dispatch notifications
|
||||||
"""
|
"""
|
||||||
from app.internal import transcription, intelligence, incident_correlator, alerter
|
from app.internal import transcription, intelligence, incident_correlator, alerter
|
||||||
|
from app.internal.feature_flags import get_flags
|
||||||
|
|
||||||
|
flags = await get_flags()
|
||||||
|
|
||||||
|
# Resolve per-system overrides: system flag=False beats global flag=True,
|
||||||
|
# but global flag=False beats everything (master switch).
|
||||||
|
system_ai_flags: dict = {}
|
||||||
|
if system_id:
|
||||||
|
sys_doc = await fstore.doc_get_cached("systems", system_id)
|
||||||
|
system_ai_flags = (sys_doc or {}).get("ai_flags") or {}
|
||||||
|
|
||||||
|
def _flag(name: str) -> bool:
|
||||||
|
if not flags[name]: # global master off
|
||||||
|
return False
|
||||||
|
return system_ai_flags.get(name, True) # system override, default inherit
|
||||||
|
|
||||||
transcript: Optional[str] = None
|
transcript: Optional[str] = None
|
||||||
segments: list[dict] = []
|
segments: list[dict] = []
|
||||||
|
|
||||||
# Step 1: Transcription
|
# Step 1: Transcription
|
||||||
if gcs_uri:
|
if gcs_uri:
|
||||||
transcript, segments = await transcription.transcribe_call(call_id, gcs_uri, talkgroup_name)
|
if _flag("stt_enabled"):
|
||||||
|
transcript, segments = await transcription.transcribe_call(
|
||||||
|
call_id, gcs_uri, talkgroup_name, system_id=system_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
scope = "globally" if not flags["stt_enabled"] else f"system {system_id}"
|
||||||
|
logger.info(f"STT disabled ({scope}) — skipping transcription for call {call_id}")
|
||||||
|
|
||||||
# Step 2: Intelligence extraction
|
# Step 2: Scene detection + intelligence extraction
|
||||||
tags: list[str] = []
|
scenes: list[dict] = []
|
||||||
incident_type: Optional[str] = None
|
if _flag("correlation_enabled"):
|
||||||
location: Optional[str] = None
|
if transcript:
|
||||||
location_coords: Optional[dict] = None
|
scenes = await intelligence.extract_scenes(
|
||||||
resolved: bool = False
|
call_id, transcript, talkgroup_name,
|
||||||
if transcript:
|
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
|
||||||
tags, incident_type, location, location_coords, resolved = await intelligence.extract_tags(
|
node_id=node_id,
|
||||||
call_id, transcript, talkgroup_name,
|
)
|
||||||
talkgroup_id=talkgroup_id, system_id=system_id, segments=segments,
|
else:
|
||||||
node_id=node_id,
|
scope = "globally" if not flags["correlation_enabled"] else f"system {system_id}"
|
||||||
)
|
logger.info(f"Correlation disabled ({scope}) — skipping scene extraction and correlation for call {call_id}")
|
||||||
|
|
||||||
# Step 3: Incident correlation (always runs — unclassified calls can still link via talkgroup)
|
# Step 3: Correlate each scene independently.
|
||||||
incident_id = await incident_correlator.correlate_call(
|
# A single recording can produce multiple incidents on a busy channel.
|
||||||
call_id=call_id,
|
incident_ids: list[str] = []
|
||||||
node_id=node_id,
|
all_tags: list[str] = []
|
||||||
system_id=system_id,
|
if flags["correlation_enabled"]:
|
||||||
talkgroup_id=talkgroup_id,
|
for scene in scenes:
|
||||||
talkgroup_name=talkgroup_name,
|
all_tags.extend(scene["tags"])
|
||||||
tags=tags,
|
corr_units = [] if scene.get("reassignment") else scene.get("units")
|
||||||
incident_type=incident_type,
|
incident_id = await incident_correlator.correlate_call(
|
||||||
location=location,
|
call_id=call_id,
|
||||||
location_coords=location_coords,
|
node_id=node_id,
|
||||||
)
|
system_id=system_id,
|
||||||
|
talkgroup_id=talkgroup_id,
|
||||||
|
talkgroup_name=talkgroup_name,
|
||||||
|
tags=scene["tags"],
|
||||||
|
incident_type=scene["incident_type"],
|
||||||
|
location=scene["location"],
|
||||||
|
location_coords=scene["location_coords"],
|
||||||
|
units=corr_units,
|
||||||
|
vehicles=scene.get("vehicles"),
|
||||||
|
cleared_units=scene.get("cleared_units"),
|
||||||
|
)
|
||||||
|
if incident_id and incident_id not in incident_ids:
|
||||||
|
incident_ids.append(incident_id)
|
||||||
|
if scene["resolved"] and incident_id:
|
||||||
|
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
||||||
|
await incident_correlator.maybe_resolve_parent(incident_id)
|
||||||
|
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
|
||||||
|
|
||||||
if resolved and incident_id:
|
# Correlator also runs for calls with no scenes (unclassified) to attempt
|
||||||
await fstore.doc_set("incidents", incident_id, {"status": "resolved"})
|
# talkgroup-based linking even when no transcript could be produced.
|
||||||
logger.info(f"Auto-resolved incident {incident_id} (LLM closure detection)")
|
if not scenes:
|
||||||
|
incident_id = await incident_correlator.correlate_call(
|
||||||
|
call_id=call_id,
|
||||||
|
node_id=node_id,
|
||||||
|
system_id=system_id,
|
||||||
|
talkgroup_id=talkgroup_id,
|
||||||
|
talkgroup_name=talkgroup_name,
|
||||||
|
tags=[],
|
||||||
|
incident_type=None,
|
||||||
|
location=None,
|
||||||
|
location_coords=None,
|
||||||
|
)
|
||||||
|
if incident_id:
|
||||||
|
incident_ids.append(incident_id)
|
||||||
|
|
||||||
|
if incident_ids:
|
||||||
|
await fstore.doc_set("calls", call_id, {"incident_ids": incident_ids})
|
||||||
|
|
||||||
# Step 4: Alert dispatch (always runs — talkgroup ID rules don't need a transcript)
|
# Step 4: Alert dispatch (always runs — talkgroup ID rules don't need a transcript)
|
||||||
await alerter.check_and_dispatch(
|
await alerter.check_and_dispatch(
|
||||||
@@ -189,6 +261,6 @@ async def _run_intelligence_pipeline(
|
|||||||
node_id=node_id,
|
node_id=node_id,
|
||||||
talkgroup_id=talkgroup_id,
|
talkgroup_id=talkgroup_id,
|
||||||
talkgroup_name=talkgroup_name,
|
talkgroup_name=talkgroup_name,
|
||||||
tags=tags,
|
tags=list(dict.fromkeys(all_tags)),
|
||||||
transcript=transcript,
|
transcript=transcript,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -0,0 +1,135 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { useAuth } from "@/components/AuthProvider";
|
||||||
|
import { c2api } from "@/lib/c2api";
|
||||||
|
import { useEffect, useState } from "react";
|
||||||
|
import { useRouter } from "next/navigation";
|
||||||
|
|
||||||
|
interface FeatureFlags {
|
||||||
|
stt_enabled: boolean;
|
||||||
|
correlation_enabled: boolean;
|
||||||
|
summaries_enabled: boolean;
|
||||||
|
vocabulary_learning_enabled: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const FLAG_META: { key: keyof FeatureFlags; label: string; description: string }[] = [
|
||||||
|
{
|
||||||
|
key: "stt_enabled",
|
||||||
|
label: "Speech-to-Text (Whisper)",
|
||||||
|
description: "Transcribe call audio via OpenAI Whisper. When off, calls are recorded and stored but no transcript is generated.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "correlation_enabled",
|
||||||
|
label: "Incident Correlation",
|
||||||
|
description: "Run scene extraction and incident correlation on each call. When off, calls are logged but not linked to incidents.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "summaries_enabled",
|
||||||
|
label: "Incident Summaries",
|
||||||
|
description: "Generate AI summaries for active incidents on each summarizer pass. Auto-resolve sweep is also paused when off.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "vocabulary_learning_enabled",
|
||||||
|
label: "Vocabulary Learning",
|
||||||
|
description: "Run the background vocabulary induction loop that proposes new STT terms from recent transcripts.",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
function Toggle({
|
||||||
|
enabled,
|
||||||
|
onChange,
|
||||||
|
disabled,
|
||||||
|
}: {
|
||||||
|
enabled: boolean;
|
||||||
|
onChange: (val: boolean) => void;
|
||||||
|
disabled: boolean;
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<button
|
||||||
|
onClick={() => onChange(!enabled)}
|
||||||
|
disabled={disabled}
|
||||||
|
className={`relative inline-flex h-6 w-11 items-center rounded-full transition-colors focus:outline-none disabled:opacity-50 ${
|
||||||
|
enabled ? "bg-indigo-600" : "bg-gray-700"
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
className={`inline-block h-4 w-4 rounded-full bg-white shadow transition-transform ${
|
||||||
|
enabled ? "translate-x-6" : "translate-x-1"
|
||||||
|
}`}
|
||||||
|
/>
|
||||||
|
</button>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function AdminPage() {
|
||||||
|
const { isAdmin } = useAuth();
|
||||||
|
const router = useRouter();
|
||||||
|
|
||||||
|
const [flags, setFlags] = useState<FeatureFlags | null>(null);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [saving, setSaving] = useState<string | null>(null);
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!isAdmin) {
|
||||||
|
router.replace("/dashboard");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
c2api.getFeatureFlags()
|
||||||
|
.then((f) => setFlags(f as unknown as FeatureFlags))
|
||||||
|
.catch((e) => setError(String(e)))
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
}, [isAdmin, router]);
|
||||||
|
|
||||||
|
async function handleToggle(key: keyof FeatureFlags, value: boolean) {
|
||||||
|
if (!flags) return;
|
||||||
|
setSaving(key);
|
||||||
|
setError(null);
|
||||||
|
try {
|
||||||
|
const updated = await c2api.setFeatureFlags({ [key]: value });
|
||||||
|
setFlags(updated as unknown as FeatureFlags);
|
||||||
|
} catch (e) {
|
||||||
|
setError(String(e));
|
||||||
|
} finally {
|
||||||
|
setSaving(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isAdmin) return null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="max-w-2xl space-y-8">
|
||||||
|
<h1 className="text-white text-xl font-bold font-mono">Admin</h1>
|
||||||
|
|
||||||
|
<section className="space-y-3">
|
||||||
|
<h2 className="text-sm font-mono text-gray-400 uppercase tracking-wider">AI Features</h2>
|
||||||
|
|
||||||
|
{error && (
|
||||||
|
<div className="bg-red-950 border border-red-800 rounded-lg p-3">
|
||||||
|
<p className="text-red-400 text-sm font-mono">{error}</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{loading ? (
|
||||||
|
<p className="text-gray-500 text-sm font-mono">Loading…</p>
|
||||||
|
) : (
|
||||||
|
<div className="bg-gray-900 border border-gray-800 rounded-xl divide-y divide-gray-800">
|
||||||
|
{FLAG_META.map(({ key, label, description }) => (
|
||||||
|
<div key={key} className="flex items-center justify-between gap-4 px-5 py-4">
|
||||||
|
<div className="min-w-0">
|
||||||
|
<p className="text-white text-sm font-semibold">{label}</p>
|
||||||
|
<p className="text-gray-500 text-xs mt-0.5 leading-snug">{description}</p>
|
||||||
|
</div>
|
||||||
|
<Toggle
|
||||||
|
enabled={flags?.[key] ?? true}
|
||||||
|
onChange={(val) => handleToggle(key, val)}
|
||||||
|
disabled={saving === key}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</section>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -197,7 +197,7 @@ export default function AlertsPage() {
|
|||||||
const unacked = alerts.filter((a) => !a.acknowledged);
|
const unacked = alerts.filter((a) => !a.acknowledged);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="p-6 max-w-7xl mx-auto space-y-6">
|
<div className="space-y-6">
|
||||||
<div className="flex items-center gap-4">
|
<div className="flex items-center gap-4">
|
||||||
<h1 className="text-white text-xl font-bold font-mono">Alerts</h1>
|
<h1 className="text-white text-xl font-bold font-mono">Alerts</h1>
|
||||||
{unacked.length > 0 && (
|
{unacked.length > 0 && (
|
||||||
|
|||||||
+179
-11
@@ -1,10 +1,69 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { useState } from "react";
|
import { useState, useMemo } from "react";
|
||||||
import { useCalls } from "@/lib/useCalls";
|
import { useCalls } from "@/lib/useCalls";
|
||||||
import { useSystems } from "@/lib/useSystems";
|
import { useSystems } from "@/lib/useSystems";
|
||||||
import { CallRow } from "@/components/CallRow";
|
import { CallRow } from "@/components/CallRow";
|
||||||
import { useAuth } from "@/components/AuthProvider";
|
import { useAuth } from "@/components/AuthProvider";
|
||||||
|
import type { CallRecord } from "@/lib/types";
|
||||||
|
|
||||||
|
const inputCls =
|
||||||
|
"bg-gray-800 border border-gray-700 rounded-lg px-3 py-1.5 text-sm text-white font-mono " +
|
||||||
|
"placeholder:text-gray-600 focus:outline-none focus:border-indigo-500 w-full";
|
||||||
|
|
||||||
|
function filterCalls(calls: CallRecord[], filters: Filters): CallRecord[] {
|
||||||
|
const q = filters.query.trim().toLowerCase();
|
||||||
|
const tgid = filters.tgid.trim();
|
||||||
|
const fromMs = filters.dateFrom ? new Date(filters.dateFrom).getTime() : null;
|
||||||
|
const toMs = filters.dateTo ? new Date(filters.dateTo + "T23:59:59").getTime() : null;
|
||||||
|
|
||||||
|
return calls.filter((c) => {
|
||||||
|
// System filter
|
||||||
|
if (filters.systemId && c.system_id !== filters.systemId) return false;
|
||||||
|
|
||||||
|
// TGID filter (exact match on the number)
|
||||||
|
if (tgid && String(c.talkgroup_id ?? "") !== tgid) return false;
|
||||||
|
|
||||||
|
// Date range
|
||||||
|
const ts = new Date(c.started_at).getTime();
|
||||||
|
if (fromMs !== null && ts < fromMs) return false;
|
||||||
|
if (toMs !== null && ts > toMs) return false;
|
||||||
|
|
||||||
|
// Free-text: talkgroup name, node_id, transcript, tags
|
||||||
|
if (q) {
|
||||||
|
const hay = [
|
||||||
|
c.talkgroup_name ?? "",
|
||||||
|
c.node_id,
|
||||||
|
c.transcript ?? "",
|
||||||
|
c.transcript_corrected ?? "",
|
||||||
|
...(c.tags ?? []),
|
||||||
|
].join(" ").toLowerCase();
|
||||||
|
if (!hay.includes(q)) return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Filters {
|
||||||
|
query: string;
|
||||||
|
tgid: string;
|
||||||
|
systemId: string;
|
||||||
|
dateFrom: string;
|
||||||
|
dateTo: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_FILTERS: Filters = {
|
||||||
|
query: "",
|
||||||
|
tgid: "",
|
||||||
|
systemId: "",
|
||||||
|
dateFrom: "",
|
||||||
|
dateTo: "",
|
||||||
|
};
|
||||||
|
|
||||||
|
function isActive(f: Filters) {
|
||||||
|
return f.query || f.tgid || f.systemId || f.dateFrom || f.dateTo;
|
||||||
|
}
|
||||||
|
|
||||||
export default function CallsPage() {
|
export default function CallsPage() {
|
||||||
const [limitCount, setLimitCount] = useState(100);
|
const [limitCount, setLimitCount] = useState(100);
|
||||||
@@ -13,22 +72,128 @@ export default function CallsPage() {
|
|||||||
const { isAdmin } = useAuth();
|
const { isAdmin } = useAuth();
|
||||||
const systemMap = Object.fromEntries(systems.map((s) => [s.system_id, s]));
|
const systemMap = Object.fromEntries(systems.map((s) => [s.system_id, s]));
|
||||||
|
|
||||||
const active = calls.filter((c) => c.status === "active");
|
const [filters, setFilters] = useState<Filters>(DEFAULT_FILTERS);
|
||||||
const ended = calls.filter((c) => c.status === "ended");
|
const [showFilters, setShowFilters] = useState(false);
|
||||||
|
|
||||||
|
function set<K extends keyof Filters>(key: K, value: string) {
|
||||||
|
setFilters((f) => ({ ...f, [key]: value }));
|
||||||
|
}
|
||||||
|
|
||||||
|
const active = calls.filter((c) => c.status === "active");
|
||||||
|
const ended = calls.filter((c) => c.status === "ended");
|
||||||
|
const filtered = useMemo(() => filterCalls(ended, filters), [ended, filters]);
|
||||||
|
|
||||||
|
const activeFilters = isActive(filters);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<h1 className="text-xl font-bold text-white font-mono">Calls</h1>
|
<h1 className="text-xl font-bold text-white font-mono">Calls</h1>
|
||||||
<span className="text-xs text-gray-500 font-mono">{calls.length} loaded</span>
|
<div className="flex items-center gap-3">
|
||||||
|
<span className="text-xs text-gray-500 font-mono">{calls.length} loaded</span>
|
||||||
|
<button
|
||||||
|
onClick={() => setShowFilters((v) => !v)}
|
||||||
|
className={`text-xs font-mono px-3 py-1.5 rounded-lg border transition-colors ${
|
||||||
|
activeFilters
|
||||||
|
? "border-indigo-600 bg-indigo-950 text-indigo-300"
|
||||||
|
: "border-gray-700 bg-gray-900 text-gray-400 hover:text-gray-200"
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{showFilters ? "Hide filters" : "Filter"}
|
||||||
|
{activeFilters && " •"}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Filter bar */}
|
||||||
|
{showFilters && (
|
||||||
|
<div className="bg-gray-900 border border-gray-800 rounded-xl p-4 space-y-3">
|
||||||
|
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-3">
|
||||||
|
{/* Text search */}
|
||||||
|
<div className="lg:col-span-2">
|
||||||
|
<label className="text-xs text-gray-500 block mb-1">Search (talkgroup, node, transcript, tags)</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={filters.query}
|
||||||
|
onChange={(e) => set("query", e.target.value)}
|
||||||
|
placeholder="fire, Engine 5, dispatch…"
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* TGID */}
|
||||||
|
<div>
|
||||||
|
<label className="text-xs text-gray-500 block mb-1">Talkgroup ID</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
value={filters.tgid}
|
||||||
|
onChange={(e) => set("tgid", e.target.value)}
|
||||||
|
placeholder="e.g. 9048"
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* System */}
|
||||||
|
<div>
|
||||||
|
<label className="text-xs text-gray-500 block mb-1">System</label>
|
||||||
|
<select
|
||||||
|
value={filters.systemId}
|
||||||
|
onChange={(e) => set("systemId", e.target.value)}
|
||||||
|
className={inputCls}
|
||||||
|
>
|
||||||
|
<option value="">All systems</option>
|
||||||
|
{systems.map((s) => (
|
||||||
|
<option key={s.system_id} value={s.system_id}>{s.name}</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Date from */}
|
||||||
|
<div>
|
||||||
|
<label className="text-xs text-gray-500 block mb-1">From date</label>
|
||||||
|
<input
|
||||||
|
type="date"
|
||||||
|
value={filters.dateFrom}
|
||||||
|
onChange={(e) => set("dateFrom", e.target.value)}
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Date to */}
|
||||||
|
<div>
|
||||||
|
<label className="text-xs text-gray-500 block mb-1">To date</label>
|
||||||
|
<input
|
||||||
|
type="date"
|
||||||
|
value={filters.dateTo}
|
||||||
|
onChange={(e) => set("dateTo", e.target.value)}
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{activeFilters && (
|
||||||
|
<div className="flex items-center justify-between pt-1">
|
||||||
|
<p className="text-xs text-gray-500 font-mono">
|
||||||
|
{filtered.length} of {ended.length} calls match
|
||||||
|
</p>
|
||||||
|
<button
|
||||||
|
onClick={() => setFilters(DEFAULT_FILTERS)}
|
||||||
|
className="text-xs text-gray-500 hover:text-gray-300 font-mono transition-colors"
|
||||||
|
>
|
||||||
|
Clear all
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Live calls — never filtered */}
|
||||||
{active.length > 0 && (
|
{active.length > 0 && (
|
||||||
<section>
|
<section>
|
||||||
<h2 className="text-sm font-semibold text-orange-400 uppercase tracking-wider mb-3">
|
<h2 className="text-sm font-semibold text-orange-400 uppercase tracking-wider mb-3">
|
||||||
Live ({active.length})
|
Live ({active.length})
|
||||||
</h2>
|
</h2>
|
||||||
<div className="overflow-x-auto">
|
<div className="bg-gray-900 border border-gray-800 rounded-xl overflow-hidden">
|
||||||
<table className="w-full text-sm">
|
<table className="w-full text-sm">
|
||||||
<thead>
|
<thead>
|
||||||
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
||||||
@@ -51,17 +216,20 @@ export default function CallsPage() {
|
|||||||
</section>
|
</section>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* History */}
|
||||||
<section>
|
<section>
|
||||||
<h2 className="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">
|
<h2 className="text-sm font-semibold text-gray-400 uppercase tracking-wider mb-3">
|
||||||
History
|
History{activeFilters && <span className="ml-2 text-indigo-400">({filtered.length} filtered)</span>}
|
||||||
</h2>
|
</h2>
|
||||||
{loading ? (
|
{loading ? (
|
||||||
<p className="text-gray-600 text-sm font-mono">Loading…</p>
|
<p className="text-gray-600 text-sm font-mono">Loading…</p>
|
||||||
) : ended.length === 0 ? (
|
) : filtered.length === 0 ? (
|
||||||
<p className="text-gray-600 text-sm font-mono">No calls recorded yet.</p>
|
<p className="text-gray-600 text-sm font-mono">
|
||||||
|
{activeFilters ? "No calls match the current filters." : "No calls recorded yet."}
|
||||||
|
</p>
|
||||||
) : (
|
) : (
|
||||||
<>
|
<>
|
||||||
<div className="overflow-x-auto">
|
<div className="bg-gray-900 border border-gray-800 rounded-xl overflow-hidden">
|
||||||
<table className="w-full text-sm">
|
<table className="w-full text-sm">
|
||||||
<thead>
|
<thead>
|
||||||
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
||||||
@@ -74,13 +242,13 @@ export default function CallsPage() {
|
|||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
{ended.map((c) => (
|
{filtered.map((c) => (
|
||||||
<CallRow key={c.call_id} call={c} systemName={systemMap[c.system_id ?? ""]?.name} isAdmin={isAdmin} />
|
<CallRow key={c.call_id} call={c} systemName={systemMap[c.system_id ?? ""]?.name} isAdmin={isAdmin} />
|
||||||
))}
|
))}
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
{ended.length >= limitCount && (
|
{!activeFilters && ended.length >= limitCount && (
|
||||||
<button
|
<button
|
||||||
onClick={() => setLimitCount((n) => n + 100)}
|
onClick={() => setLimitCount((n) => n + 100)}
|
||||||
className="mt-4 text-sm text-indigo-400 hover:text-indigo-300 font-mono transition-colors"
|
className="mt-4 text-sm text-indigo-400 hover:text-indigo-300 font-mono transition-colors"
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ export default function DashboardPage() {
|
|||||||
{calls.length === 0 ? (
|
{calls.length === 0 ? (
|
||||||
<p className="text-gray-600 text-sm font-mono">No calls recorded yet.</p>
|
<p className="text-gray-600 text-sm font-mono">No calls recorded yet.</p>
|
||||||
) : (
|
) : (
|
||||||
<div className="overflow-x-auto">
|
<div className="bg-gray-900 border border-gray-800 rounded-xl overflow-hidden">
|
||||||
<table className="w-full text-sm">
|
<table className="w-full text-sm">
|
||||||
<thead>
|
<thead>
|
||||||
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
<tr className="text-xs text-gray-500 uppercase tracking-wider border-b border-gray-800">
|
||||||
|
|||||||
@@ -4,6 +4,105 @@
|
|||||||
|
|
||||||
@import 'leaflet/dist/leaflet.css';
|
@import 'leaflet/dist/leaflet.css';
|
||||||
|
|
||||||
|
/* ── Base ─────────────────────────────────────────────────────────────────── */
|
||||||
html, body {
|
html, body {
|
||||||
@apply bg-gray-950 text-gray-100 font-mono;
|
@apply bg-gray-950 text-gray-100 font-mono;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ── Light mode overrides ─────────────────────────────────────────────────── */
|
||||||
|
/*
|
||||||
|
* The app's components use hardcoded dark-palette Tailwind classes (bg-gray-9xx,
|
||||||
|
* text-gray-xxx). Rather than adding dark: prefixes everywhere, we remap those
|
||||||
|
* classes here when the html element doesn't carry the .dark class.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Structural backgrounds */
|
||||||
|
html:not(.dark),
|
||||||
|
html:not(.dark) body { background-color: #f1f5f9; color: #0f172a; }
|
||||||
|
html:not(.dark) .bg-gray-950 { background-color: #f1f5f9 !important; }
|
||||||
|
html:not(.dark) .bg-gray-950\/95 { background-color: rgba(241,245,249,0.95) !important; }
|
||||||
|
html:not(.dark) .bg-gray-900 { background-color: #ffffff !important; }
|
||||||
|
html:not(.dark) .bg-gray-900\/60 { background-color: rgba(255,255,255,0.85) !important; }
|
||||||
|
html:not(.dark) .bg-gray-900\/50 { background-color: rgba(255,255,255,0.75) !important; }
|
||||||
|
html:not(.dark) .bg-gray-900\/30 { background-color: rgba(255,255,255,0.50) !important; }
|
||||||
|
html:not(.dark) .bg-gray-800 { background-color: #f1f5f9 !important; }
|
||||||
|
html:not(.dark) .bg-gray-800\/40 { background-color: rgba(241,245,249,0.60) !important; }
|
||||||
|
html:not(.dark) .bg-gray-800\/30 { background-color: rgba(241,245,249,0.50) !important; }
|
||||||
|
html:not(.dark) .bg-gray-700 { background-color: #e2e8f0 !important; }
|
||||||
|
|
||||||
|
/* Borders */
|
||||||
|
html:not(.dark) .border-gray-800 { border-color: #e2e8f0 !important; }
|
||||||
|
html:not(.dark) .border-gray-700 { border-color: #cbd5e1 !important; }
|
||||||
|
html:not(.dark) .divide-gray-800 > * + * { border-color: #e2e8f0 !important; }
|
||||||
|
|
||||||
|
/* Text */
|
||||||
|
html:not(.dark) .text-white { color: #0f172a !important; }
|
||||||
|
html:not(.dark) .text-gray-100 { color: #1e293b !important; }
|
||||||
|
html:not(.dark) .text-gray-300 { color: #334155 !important; }
|
||||||
|
html:not(.dark) .text-gray-400 { color: #475569 !important; }
|
||||||
|
html:not(.dark) .text-gray-500 { color: #64748b !important; }
|
||||||
|
html:not(.dark) .text-gray-600 { color: #94a3b8 !important; }
|
||||||
|
|
||||||
|
/* Hover states */
|
||||||
|
html:not(.dark) .hover\:bg-gray-900:hover { background-color: #f8fafc !important; }
|
||||||
|
html:not(.dark) .hover\:bg-gray-900\/50:hover { background-color: rgba(255,255,255,0.75) !important; }
|
||||||
|
html:not(.dark) .hover\:bg-gray-800:hover { background-color: #f1f5f9 !important; }
|
||||||
|
html:not(.dark) .hover\:bg-gray-700:hover { background-color: #e2e8f0 !important; }
|
||||||
|
html:not(.dark) .active\:bg-gray-800:active { background-color: #f1f5f9 !important; }
|
||||||
|
|
||||||
|
/* Hover text */
|
||||||
|
html:not(.dark) .hover\:text-gray-300:hover { color: #334155 !important; }
|
||||||
|
html:not(.dark) .hover\:text-gray-200:hover { color: #1e293b !important; }
|
||||||
|
|
||||||
|
/* ── Accent badge palette (dark → light) ─────────────────────────────────── */
|
||||||
|
|
||||||
|
/* Fire / Error */
|
||||||
|
html:not(.dark) .bg-red-900 { background-color: #fef2f2 !important; }
|
||||||
|
html:not(.dark) .bg-red-950 { background-color: #fff1f2 !important; }
|
||||||
|
html:not(.dark) .text-red-300 { color: #b91c1c !important; }
|
||||||
|
html:not(.dark) .text-red-400 { color: #dc2626 !important; }
|
||||||
|
html:not(.dark) .border-red-800 { border-color: #fca5a5 !important; }
|
||||||
|
|
||||||
|
/* Police */
|
||||||
|
html:not(.dark) .bg-blue-900 { background-color: #eff6ff !important; }
|
||||||
|
html:not(.dark) .bg-blue-950 { background-color: #eff6ff !important; }
|
||||||
|
html:not(.dark) .text-blue-300 { color: #1d4ed8 !important; }
|
||||||
|
html:not(.dark) .border-blue-800 { border-color: #93c5fd !important; }
|
||||||
|
|
||||||
|
/* EMS */
|
||||||
|
html:not(.dark) .bg-yellow-900 { background-color: #fefce8 !important; }
|
||||||
|
html:not(.dark) .bg-yellow-950 { background-color: #fefce8 !important; }
|
||||||
|
html:not(.dark) .text-yellow-300 { color: #a16207 !important; }
|
||||||
|
html:not(.dark) .text-yellow-400 { color: #ca8a04 !important; }
|
||||||
|
|
||||||
|
/* Accident / Recording */
|
||||||
|
html:not(.dark) .bg-orange-900 { background-color: #fff7ed !important; }
|
||||||
|
html:not(.dark) .bg-orange-950 { background-color: #fff7ed !important; }
|
||||||
|
html:not(.dark) .text-orange-300 { color: #c2410c !important; }
|
||||||
|
html:not(.dark) .text-orange-400 { color: #ea580c !important; }
|
||||||
|
html:not(.dark) .border-orange-800 { border-color: #fdba74 !important; }
|
||||||
|
|
||||||
|
/* Active / Online */
|
||||||
|
html:not(.dark) .bg-green-900 { background-color: #f0fdf4 !important; }
|
||||||
|
html:not(.dark) .bg-green-950 { background-color: #f0fdf4 !important; }
|
||||||
|
html:not(.dark) .text-green-300 { color: #15803d !important; }
|
||||||
|
html:not(.dark) .text-green-400 { color: #16a34a !important; }
|
||||||
|
html:not(.dark) .border-green-800 { border-color: #86efac !important; }
|
||||||
|
|
||||||
|
/* Unconfigured / Info */
|
||||||
|
html:not(.dark) .bg-indigo-950 { background-color: #eef2ff !important; }
|
||||||
|
html:not(.dark) .bg-indigo-900 { background-color: #eef2ff !important; }
|
||||||
|
html:not(.dark) .text-indigo-300 { color: #4338ca !important; }
|
||||||
|
html:not(.dark) .text-indigo-400 { color: #6366f1 !important; }
|
||||||
|
html:not(.dark) .border-indigo-800 { border-color: #a5b4fc !important; }
|
||||||
|
|
||||||
|
/* ── Form inputs ─────────────────────────────────────────────────────────── */
|
||||||
|
html:not(.dark) input:not([type="submit"]):not([type="button"]):not([type="reset"]),
|
||||||
|
html:not(.dark) select,
|
||||||
|
html:not(.dark) textarea {
|
||||||
|
color: #0f172a;
|
||||||
|
}
|
||||||
|
html:not(.dark) input::placeholder,
|
||||||
|
html:not(.dark) textarea::placeholder {
|
||||||
|
color: #94a3b8;
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,22 @@ const TYPE_COLORS: Record<string, string> = {
|
|||||||
other: "bg-gray-800 text-gray-300",
|
other: "bg-gray-800 text-gray-300",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const SEVERITY_COLORS: Record<string, string> = {
|
||||||
|
major: "bg-red-950 text-red-400",
|
||||||
|
moderate: "bg-orange-950 text-orange-400",
|
||||||
|
minor: "bg-gray-800 text-gray-400",
|
||||||
|
};
|
||||||
|
|
||||||
|
function severityBadge(severity: string | null | undefined) {
|
||||||
|
if (!severity || severity === "unknown") return null;
|
||||||
|
const cls = SEVERITY_COLORS[severity] ?? "bg-gray-800 text-gray-400";
|
||||||
|
return (
|
||||||
|
<span className={`text-xs font-mono px-2 py-0.5 rounded-full capitalize ${cls}`}>
|
||||||
|
{severity}
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
function typeBadge(type: string | null) {
|
function typeBadge(type: string | null) {
|
||||||
const cls = TYPE_COLORS[type ?? "other"] ?? TYPE_COLORS.other;
|
const cls = TYPE_COLORS[type ?? "other"] ?? TYPE_COLORS.other;
|
||||||
return (
|
return (
|
||||||
@@ -51,6 +67,7 @@ function IncidentRow({ incident, isAdmin, onResolve }: {
|
|||||||
{incident.status}
|
{incident.status}
|
||||||
</span>
|
</span>
|
||||||
</td>
|
</td>
|
||||||
|
<td className="px-4 py-3">{severityBadge(incident.severity)}</td>
|
||||||
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{incident.call_ids.length}</td>
|
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{incident.call_ids.length}</td>
|
||||||
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{fmtTime(incident.started_at)}</td>
|
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{fmtTime(incident.started_at)}</td>
|
||||||
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{fmtTime(incident.updated_at)}</td>
|
<td className="px-4 py-3 text-gray-400 text-xs font-mono">{fmtTime(incident.updated_at)}</td>
|
||||||
@@ -167,9 +184,12 @@ function IncidentCards({ incidents, isAdmin, onResolve }: {
|
|||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
<p className="text-white text-sm font-semibold leading-snug">{inc.title ?? "—"}</p>
|
<p className="text-white text-sm font-semibold leading-snug">{inc.title ?? "—"}</p>
|
||||||
<p className="text-gray-500 text-xs mt-1 font-mono">
|
<div className="flex items-center gap-2 mt-1">
|
||||||
{fmtTime(inc.started_at)} · {inc.call_ids.length} call{inc.call_ids.length !== 1 ? "s" : ""}
|
{severityBadge(inc.severity)}
|
||||||
</p>
|
<p className="text-gray-500 text-xs font-mono">
|
||||||
|
{fmtTime(inc.started_at)} · {inc.call_ids.length} call{inc.call_ids.length !== 1 ? "s" : ""}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
@@ -196,6 +216,7 @@ function IncidentTable({ incidents, isAdmin, onResolve }: {
|
|||||||
<th className="px-4 py-3">Type</th>
|
<th className="px-4 py-3">Type</th>
|
||||||
<th className="px-4 py-3">Title</th>
|
<th className="px-4 py-3">Title</th>
|
||||||
<th className="px-4 py-3">Status</th>
|
<th className="px-4 py-3">Status</th>
|
||||||
|
<th className="px-4 py-3">Severity</th>
|
||||||
<th className="px-4 py-3">Calls</th>
|
<th className="px-4 py-3">Calls</th>
|
||||||
<th className="px-4 py-3">Started</th>
|
<th className="px-4 py-3">Started</th>
|
||||||
<th className="px-4 py-3">Updated</th>
|
<th className="px-4 py-3">Updated</th>
|
||||||
@@ -232,7 +253,7 @@ export default function IncidentsPage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="p-6 max-w-7xl mx-auto space-y-8">
|
<div className="space-y-8">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<div className="flex items-center gap-3">
|
<div className="flex items-center gap-3">
|
||||||
<h1 className="text-white text-xl font-bold font-mono">Incidents</h1>
|
<h1 className="text-white text-xl font-bold font-mono">Incidents</h1>
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import type { Metadata } from "next";
|
import type { Metadata } from "next";
|
||||||
import { Nav } from "@/components/Nav";
|
import { Nav } from "@/components/Nav";
|
||||||
import { AuthProvider } from "@/components/AuthProvider";
|
import { AuthProvider } from "@/components/AuthProvider";
|
||||||
|
import { ThemeProvider } from "@/components/ThemeProvider";
|
||||||
import "./globals.css";
|
import "./globals.css";
|
||||||
|
|
||||||
export const metadata: Metadata = {
|
export const metadata: Metadata = {
|
||||||
@@ -10,12 +11,18 @@ export const metadata: Metadata = {
|
|||||||
|
|
||||||
export default function RootLayout({ children }: { children: React.ReactNode }) {
|
export default function RootLayout({ children }: { children: React.ReactNode }) {
|
||||||
return (
|
return (
|
||||||
<html lang="en" className="dark">
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
{/* Prevent flash of wrong theme before React hydrates */}
|
||||||
|
<script dangerouslySetInnerHTML={{ __html: `(function(){try{var t=localStorage.getItem('drb-theme');if(t!=='light')document.documentElement.classList.add('dark');}catch(e){}})();` }} />
|
||||||
|
</head>
|
||||||
<body className="min-h-screen bg-gray-950">
|
<body className="min-h-screen bg-gray-950">
|
||||||
<AuthProvider>
|
<ThemeProvider>
|
||||||
<Nav />
|
<AuthProvider>
|
||||||
<main className="p-6">{children}</main>
|
<Nav />
|
||||||
</AuthProvider>
|
<main className="max-w-screen-2xl mx-auto px-4 md:px-6 py-6">{children}</main>
|
||||||
|
</AuthProvider>
|
||||||
|
</ThemeProvider>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -50,26 +50,14 @@ export default function MapPage() {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-4">
|
<div className="space-y-4">
|
||||||
<div className="flex items-center justify-between">
|
<h1 className="text-xl font-bold text-white font-mono">Map</h1>
|
||||||
<h1 className="text-xl font-bold text-white font-mono">Map</h1>
|
|
||||||
<div className="flex items-center gap-4 text-xs font-mono text-gray-400">
|
|
||||||
<span><span className="text-green-400">●</span> Online</span>
|
|
||||||
<span><span className="text-orange-400 animate-pulse">●</span> Recording</span>
|
|
||||||
<span><span className="text-indigo-400">●</span> Unconfigured</span>
|
|
||||||
<span><span className="text-gray-600">●</span> Offline</span>
|
|
||||||
<span className="border-l border-gray-700 pl-4"><span className="text-red-500">■</span> Fire</span>
|
|
||||||
<span><span className="text-blue-500">■</span> Police</span>
|
|
||||||
<span><span className="text-yellow-500">■</span> EMS</span>
|
|
||||||
<span><span className="text-orange-500">■</span> Accident</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{loading ? (
|
{loading ? (
|
||||||
<div className="flex items-center justify-center h-96 text-gray-600 font-mono text-sm">
|
<div className="flex items-center justify-center h-96 text-gray-600 font-mono text-sm">
|
||||||
Loading map…
|
Loading map…
|
||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
<div style={{ height: "calc(100vh - 280px)", minHeight: "400px" }}>
|
<div className="h-[50vh] sm:h-[65vh] min-h-[400px]">
|
||||||
<MapView nodes={nodes} activeCalls={activeCalls} incidents={incidents} />
|
<MapView nodes={nodes} activeCalls={activeCalls} incidents={incidents} />
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
import { useState } from "react";
|
import { useState } from "react";
|
||||||
import { useSystems } from "@/lib/useSystems";
|
import { useSystems } from "@/lib/useSystems";
|
||||||
import { c2api } from "@/lib/c2api";
|
import { c2api } from "@/lib/c2api";
|
||||||
import type { SystemRecord } from "@/lib/types";
|
import type { SystemRecord, VocabularyPendingTerm } from "@/lib/types";
|
||||||
|
|
||||||
// ── P25 structured config types ──────────────────────────────────────────────
|
// ── P25 structured config types ──────────────────────────────────────────────
|
||||||
|
|
||||||
@@ -433,6 +433,280 @@ function SystemForm({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── Per-system AI flags panel ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
interface SystemAiFlags {
|
||||||
|
stt_enabled?: boolean;
|
||||||
|
correlation_enabled?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
function AiFlagsPanel({ systemId, initial }: { systemId: string; initial: SystemAiFlags }) {
|
||||||
|
const [flags, setFlags] = useState<SystemAiFlags>(initial);
|
||||||
|
const [saving, setSaving] = useState<string | null>(null);
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
|
||||||
|
async function handleToggle(key: keyof SystemAiFlags, value: boolean) {
|
||||||
|
setSaving(key);
|
||||||
|
try {
|
||||||
|
const result = await c2api.setSystemAiFlags(systemId, { [key]: value });
|
||||||
|
setFlags(result.ai_flags as SystemAiFlags);
|
||||||
|
} finally {
|
||||||
|
setSaving(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleClear(key: keyof SystemAiFlags) {
|
||||||
|
setSaving(key);
|
||||||
|
try {
|
||||||
|
const result = await c2api.setSystemAiFlags(systemId, { [key]: null });
|
||||||
|
setFlags(result.ai_flags as SystemAiFlags);
|
||||||
|
} finally {
|
||||||
|
setSaving(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const rows: { key: keyof SystemAiFlags; label: string }[] = [
|
||||||
|
{ key: "stt_enabled", label: "Speech-to-Text" },
|
||||||
|
{ key: "correlation_enabled", label: "Incident Correlation" },
|
||||||
|
];
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="mt-3 border-t border-gray-800 pt-3">
|
||||||
|
<button
|
||||||
|
onClick={() => setOpen((v) => !v)}
|
||||||
|
className="text-xs text-gray-500 hover:text-gray-300 font-mono transition-colors flex items-center gap-1"
|
||||||
|
>
|
||||||
|
<span>{open ? "▲" : "▼"}</span>
|
||||||
|
<span>AI Flags</span>
|
||||||
|
{(flags.stt_enabled === false || flags.correlation_enabled === false) && (
|
||||||
|
<span className="ml-1.5 text-yellow-600 font-bold">!</span>
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{open && (
|
||||||
|
<div className="mt-3 space-y-2 font-mono text-xs">
|
||||||
|
{rows.map(({ key, label }) => {
|
||||||
|
const override = flags[key];
|
||||||
|
const isSet = override !== undefined;
|
||||||
|
const isOn = override !== false;
|
||||||
|
return (
|
||||||
|
<div key={key} className="flex items-center gap-3">
|
||||||
|
<button
|
||||||
|
onClick={() => handleToggle(key, !isOn)}
|
||||||
|
disabled={saving === key}
|
||||||
|
className={`relative inline-flex h-5 w-9 items-center rounded-full transition-colors disabled:opacity-50 ${
|
||||||
|
isOn ? "bg-indigo-600" : "bg-gray-700"
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
<span className={`inline-block h-3.5 w-3.5 rounded-full bg-white shadow transition-transform ${isOn ? "translate-x-4" : "translate-x-0.5"}`} />
|
||||||
|
</button>
|
||||||
|
<span className="text-gray-300 flex-1">{label}</span>
|
||||||
|
{isSet ? (
|
||||||
|
<button
|
||||||
|
onClick={() => handleClear(key)}
|
||||||
|
disabled={saving === key}
|
||||||
|
className="text-gray-600 hover:text-gray-400 transition-colors"
|
||||||
|
title="Clear override (inherit global)"
|
||||||
|
>
|
||||||
|
reset
|
||||||
|
</button>
|
||||||
|
) : (
|
||||||
|
<span className="text-gray-700">inherits global</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
<p className="text-gray-700 pt-1">Overrides apply on top of global AI flags. "reset" restores global default.</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ── Vocabulary panel ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
function VocabularyPanel({ systemId }: { systemId: string }) {
|
||||||
|
const [vocab, setVocab] = useState<string[] | null>(null);
|
||||||
|
const [pending, setPending] = useState<VocabularyPendingTerm[]>([]);
|
||||||
|
const [bootstrapped, setBootstrapped] = useState(false);
|
||||||
|
const [loading, setLoading] = useState(false);
|
||||||
|
const [bootstrapping, setBootstrapping] = useState(false);
|
||||||
|
const [newTerm, setNewTerm] = useState("");
|
||||||
|
const [adding, setAdding] = useState(false);
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
|
||||||
|
async function load() {
|
||||||
|
if (vocab !== null) return; // already loaded
|
||||||
|
setLoading(true);
|
||||||
|
try {
|
||||||
|
const data = await c2api.getVocabulary(systemId);
|
||||||
|
setVocab(data.vocabulary);
|
||||||
|
setPending(data.vocabulary_pending);
|
||||||
|
setBootstrapped(data.vocabulary_bootstrapped);
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function toggle() {
|
||||||
|
if (!open) load();
|
||||||
|
setOpen((v) => !v);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleBootstrap() {
|
||||||
|
setBootstrapping(true);
|
||||||
|
try {
|
||||||
|
const result = await c2api.bootstrapVocabulary(systemId);
|
||||||
|
const data = await c2api.getVocabulary(systemId);
|
||||||
|
setVocab(data.vocabulary);
|
||||||
|
setPending(data.vocabulary_pending);
|
||||||
|
setBootstrapped(true);
|
||||||
|
alert(`Bootstrap added ${result.added} term(s).`);
|
||||||
|
} finally {
|
||||||
|
setBootstrapping(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleAdd(e: React.FormEvent) {
|
||||||
|
e.preventDefault();
|
||||||
|
const term = newTerm.trim();
|
||||||
|
if (!term) return;
|
||||||
|
setAdding(true);
|
||||||
|
try {
|
||||||
|
await c2api.addVocabularyTerm(systemId, term);
|
||||||
|
setVocab((v) => (v ? [...v, term] : [term]));
|
||||||
|
setNewTerm("");
|
||||||
|
} finally {
|
||||||
|
setAdding(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleRemove(term: string) {
|
||||||
|
await c2api.removeVocabularyTerm(systemId, term);
|
||||||
|
setVocab((v) => (v ?? []).filter((t) => t !== term));
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleApprove(term: string) {
|
||||||
|
await c2api.approvePendingTerm(systemId, term);
|
||||||
|
setVocab((v) => (v ? [...v, term] : [term]));
|
||||||
|
setPending((p) => p.filter((t) => t.term !== term));
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleDismiss(term: string) {
|
||||||
|
await c2api.dismissPendingTerm(systemId, term);
|
||||||
|
setPending((p) => p.filter((t) => t.term !== term));
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="mt-3 border-t border-gray-800 pt-3">
|
||||||
|
<button
|
||||||
|
onClick={toggle}
|
||||||
|
className="text-xs text-gray-500 hover:text-gray-300 font-mono transition-colors flex items-center gap-1"
|
||||||
|
>
|
||||||
|
<span>{open ? "▲" : "▼"}</span>
|
||||||
|
<span>
|
||||||
|
Vocabulary
|
||||||
|
{vocab !== null && <span className="text-gray-600 ml-1">({vocab.length} terms{pending.length > 0 ? `, ${pending.length} pending` : ""})</span>}
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{open && (
|
||||||
|
<div className="mt-3 space-y-3 font-mono text-xs">
|
||||||
|
{loading && <p className="text-gray-600 italic">Loading…</p>}
|
||||||
|
|
||||||
|
{!loading && vocab !== null && (
|
||||||
|
<>
|
||||||
|
{/* Bootstrap button */}
|
||||||
|
<div className="flex items-center gap-3">
|
||||||
|
<button
|
||||||
|
onClick={handleBootstrap}
|
||||||
|
disabled={bootstrapping}
|
||||||
|
className="bg-indigo-800 hover:bg-indigo-700 disabled:opacity-50 text-indigo-200 px-3 py-1.5 rounded-lg text-xs transition-colors"
|
||||||
|
>
|
||||||
|
{bootstrapping ? "Bootstrapping…" : bootstrapped ? "Re-bootstrap with AI" : "Bootstrap with AI"}
|
||||||
|
</button>
|
||||||
|
<span className="text-gray-600">GPT-4o generates local knowledge (agencies, units, streets)</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Approved vocabulary chips */}
|
||||||
|
<div>
|
||||||
|
<p className="text-gray-500 uppercase tracking-wider mb-1.5">Approved ({vocab.length})</p>
|
||||||
|
{vocab.length > 0 ? (
|
||||||
|
<div className="flex flex-wrap gap-1.5">
|
||||||
|
{vocab.map((term) => (
|
||||||
|
<span
|
||||||
|
key={term}
|
||||||
|
className="inline-flex items-center gap-1 bg-gray-800 text-gray-300 px-2 py-0.5 rounded-full"
|
||||||
|
>
|
||||||
|
{term}
|
||||||
|
<button
|
||||||
|
onClick={() => handleRemove(term)}
|
||||||
|
className="text-gray-600 hover:text-red-400 transition-colors leading-none"
|
||||||
|
>
|
||||||
|
×
|
||||||
|
</button>
|
||||||
|
</span>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<p className="text-gray-600 italic">No terms yet — bootstrap or add manually.</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Add term */}
|
||||||
|
<form onSubmit={handleAdd} className="flex gap-2">
|
||||||
|
<input
|
||||||
|
value={newTerm}
|
||||||
|
onChange={(e) => setNewTerm(e.target.value)}
|
||||||
|
placeholder="Add term (e.g. 5-baker, YVAC)"
|
||||||
|
className="flex-1 bg-gray-800 border border-gray-700 rounded-lg px-3 py-1.5 text-white text-xs font-mono focus:outline-none focus:border-indigo-500"
|
||||||
|
/>
|
||||||
|
<button
|
||||||
|
type="submit"
|
||||||
|
disabled={adding || !newTerm.trim()}
|
||||||
|
className="bg-gray-700 hover:bg-gray-600 disabled:opacity-50 text-gray-200 px-3 py-1.5 rounded-lg transition-colors"
|
||||||
|
>
|
||||||
|
Add
|
||||||
|
</button>
|
||||||
|
</form>
|
||||||
|
|
||||||
|
{/* Pending induction suggestions */}
|
||||||
|
{pending.length > 0 && (
|
||||||
|
<div>
|
||||||
|
<p className="text-gray-500 uppercase tracking-wider mb-1.5">
|
||||||
|
Induction suggestions ({pending.length})
|
||||||
|
</p>
|
||||||
|
<div className="space-y-1">
|
||||||
|
{pending.map((p) => (
|
||||||
|
<div key={p.term} className="flex items-center gap-2">
|
||||||
|
<span className="text-gray-300 flex-1">{p.term}</span>
|
||||||
|
<span className="text-gray-600">{p.source}</span>
|
||||||
|
<button
|
||||||
|
onClick={() => handleApprove(p.term)}
|
||||||
|
className="text-green-500 hover:text-green-400 transition-colors px-1"
|
||||||
|
>
|
||||||
|
✓
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
onClick={() => handleDismiss(p.term)}
|
||||||
|
className="text-gray-600 hover:text-red-400 transition-colors px-1"
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// ── Systems list page ─────────────────────────────────────────────────────────
|
// ── Systems list page ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
export default function SystemsPage() {
|
export default function SystemsPage() {
|
||||||
@@ -445,7 +719,7 @@ export default function SystemsPage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6 max-w-3xl">
|
<div className="space-y-6">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<h1 className="text-xl font-bold text-white font-mono">Systems</h1>
|
<h1 className="text-xl font-bold text-white font-mono">Systems</h1>
|
||||||
<button
|
<button
|
||||||
@@ -509,6 +783,8 @@ export default function SystemsPage() {
|
|||||||
Delete
|
Delete
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
<AiFlagsPanel systemId={s.system_id} initial={(s as unknown as { ai_flags?: SystemAiFlags }).ai_flags ?? {}} />
|
||||||
|
<VocabularyPanel systemId={s.system_id} />
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
})}
|
})}
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ export default function TokensPage() {
|
|||||||
if (authLoading || !isAdmin) return null;
|
if (authLoading || !isAdmin) return null;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6 max-w-2xl">
|
<div className="space-y-6">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<div>
|
<div>
|
||||||
<h1 className="text-xl font-bold text-white font-mono">Bot Token Pool</h1>
|
<h1 className="text-xl font-bold text-white font-mono">Bot Token Pool</h1>
|
||||||
|
|||||||
@@ -31,8 +31,13 @@ export function CallRow({ call, systemName, isAdmin }: Props) {
|
|||||||
const [editText, setEditText] = useState("");
|
const [editText, setEditText] = useState("");
|
||||||
const [saving, setSaving] = useState(false);
|
const [saving, setSaving] = useState(false);
|
||||||
const [saveError, setSaveError] = useState<string | null>(null);
|
const [saveError, setSaveError] = useState<string | null>(null);
|
||||||
|
// Resolve incident links: prefer new list, fall back to legacy single field.
|
||||||
|
const incidentIds: string[] = (call.incident_ids?.length ?? 0) > 0
|
||||||
|
? call.incident_ids
|
||||||
|
: call.incident_id ? [call.incident_id] : [];
|
||||||
|
|
||||||
const isActive = call.status === "active";
|
const isActive = call.status === "active";
|
||||||
const hasDetails = call.transcript || call.transcript_corrected || (call.tags && call.tags.length > 0) || call.incident_id;
|
const hasDetails = call.transcript || call.transcript_corrected || (call.tags && call.tags.length > 0) || incidentIds.length > 0 || call.audio_url;
|
||||||
const displayTranscript = (!showOriginal && call.transcript_corrected) ? call.transcript_corrected : call.transcript;
|
const displayTranscript = (!showOriginal && call.transcript_corrected) ? call.transcript_corrected : call.transcript;
|
||||||
const hasBoth = !!(call.transcript && call.transcript_corrected);
|
const hasBoth = !!(call.transcript && call.transcript_corrected);
|
||||||
const hasSegments = call.segments && call.segments.length > 1;
|
const hasSegments = call.segments && call.segments.length > 1;
|
||||||
@@ -82,19 +87,11 @@ export function CallRow({ call, systemName, isAdmin }: Props) {
|
|||||||
<span className="text-gray-500">{duration(call.started_at, call.ended_at)}</span>
|
<span className="text-gray-500">{duration(call.started_at, call.ended_at)}</span>
|
||||||
)}
|
)}
|
||||||
</td>
|
</td>
|
||||||
<td className="px-4 py-2">
|
<td className="px-4 py-2 text-xs">
|
||||||
{call.audio_url ? (
|
{call.audio_url ? (
|
||||||
<a
|
<span className="text-blue-400">▶</span>
|
||||||
href={call.audio_url}
|
|
||||||
target="_blank"
|
|
||||||
rel="noopener noreferrer"
|
|
||||||
onClick={(e) => e.stopPropagation()}
|
|
||||||
className="text-blue-400 hover:text-blue-300 text-xs"
|
|
||||||
>
|
|
||||||
audio
|
|
||||||
</a>
|
|
||||||
) : (
|
) : (
|
||||||
<span className="text-gray-700 text-xs">—</span>
|
<span className="text-gray-700">—</span>
|
||||||
)}
|
)}
|
||||||
</td>
|
</td>
|
||||||
<td className="px-4 py-2 text-gray-600 text-xs">
|
<td className="px-4 py-2 text-gray-600 text-xs">
|
||||||
@@ -105,6 +102,16 @@ export function CallRow({ call, systemName, isAdmin }: Props) {
|
|||||||
{expanded && hasDetails && (
|
{expanded && hasDetails && (
|
||||||
<tr className="bg-gray-900/60 border-b border-gray-800">
|
<tr className="bg-gray-900/60 border-b border-gray-800">
|
||||||
<td colSpan={7} className="px-6 py-3 space-y-2">
|
<td colSpan={7} className="px-6 py-3 space-y-2">
|
||||||
|
{/* Audio player */}
|
||||||
|
{call.audio_url && (
|
||||||
|
<audio
|
||||||
|
controls
|
||||||
|
src={call.audio_url}
|
||||||
|
className="w-full max-w-sm h-8"
|
||||||
|
onClick={(e) => e.stopPropagation()}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* Tags */}
|
{/* Tags */}
|
||||||
{call.tags && call.tags.length > 0 && (
|
{call.tags && call.tags.length > 0 && (
|
||||||
<div className="flex flex-wrap gap-1">
|
<div className="flex flex-wrap gap-1">
|
||||||
@@ -119,14 +126,39 @@ export function CallRow({ call, systemName, isAdmin }: Props) {
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{/* Incident link */}
|
{/* Incident links — one per scene detected in the recording */}
|
||||||
{call.incident_id && (
|
{incidentIds.length > 0 && (
|
||||||
<p className="text-xs font-mono text-indigo-400">
|
<div className="flex flex-wrap gap-x-3 gap-y-0.5 text-xs font-mono text-indigo-400">
|
||||||
Incident:{" "}
|
<span className="text-gray-600">{incidentIds.length === 1 ? "Incident:" : "Incidents:"}</span>
|
||||||
<a href={`/incidents/${call.incident_id}`} className="underline hover:text-indigo-300">
|
{incidentIds.map((id) => (
|
||||||
{call.incident_id.slice(0, 8)}…
|
<a key={id} href={`/incidents/${id}`} className="underline hover:text-indigo-300">
|
||||||
</a>
|
{id.slice(0, 8)}…
|
||||||
</p>
|
</a>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Correlation debug — admin only */}
|
||||||
|
{isAdmin && call.corr_path && (
|
||||||
|
<div className="flex flex-wrap gap-x-3 gap-y-0.5 text-xs font-mono text-gray-600">
|
||||||
|
<span>corr:</span>
|
||||||
|
<span className="text-gray-400">{call.corr_path}</span>
|
||||||
|
{call.corr_incident_idle_min != null && (
|
||||||
|
<span>idle {call.corr_incident_idle_min}min</span>
|
||||||
|
)}
|
||||||
|
{call.corr_score != null && (
|
||||||
|
<span>sim={call.corr_score.toFixed(3)}</span>
|
||||||
|
)}
|
||||||
|
{call.corr_distance_km != null && (
|
||||||
|
<span>dist={call.corr_distance_km}km</span>
|
||||||
|
)}
|
||||||
|
{call.corr_shared_units != null && (
|
||||||
|
<span>{call.corr_shared_units} shared units</span>
|
||||||
|
)}
|
||||||
|
{call.corr_candidates != null && (
|
||||||
|
<span>{call.corr_candidates} candidates</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{/* Transcript */}
|
{/* Transcript */}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { MapContainer, TileLayer, Marker, Popup } from "react-leaflet";
|
import { MapContainer, TileLayer, Marker, Popup, LayersControl, FeatureGroup } from "react-leaflet";
|
||||||
import L from "leaflet";
|
import L from "leaflet";
|
||||||
import type { NodeRecord, CallRecord, IncidentRecord } from "@/lib/types";
|
import type { NodeRecord, CallRecord, IncidentRecord } from "@/lib/types";
|
||||||
|
|
||||||
@@ -59,7 +59,6 @@ export default function MapView({ nodes, activeCalls, incidents = [] }: Props) {
|
|||||||
activeCalls.map((c) => [c.node_id, c])
|
activeCalls.map((c) => [c.node_id, c])
|
||||||
);
|
);
|
||||||
|
|
||||||
// Only show incidents that have been geocoded (location_coords set by the server).
|
|
||||||
const plottedIncidents = incidents.flatMap((inc) =>
|
const plottedIncidents = incidents.flatMap((inc) =>
|
||||||
inc.location_coords
|
inc.location_coords
|
||||||
? [{ inc, pos: [inc.location_coords.lat, inc.location_coords.lng] as [number, number] }]
|
? [{ inc, pos: [inc.location_coords.lat, inc.location_coords.lng] as [number, number] }]
|
||||||
@@ -81,64 +80,104 @@ export default function MapView({ nodes, activeCalls, incidents = [] }: Props) {
|
|||||||
: 4;
|
: 4;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<MapContainer
|
<div className="relative w-full h-full">
|
||||||
center={center}
|
<MapContainer
|
||||||
zoom={zoom}
|
center={center}
|
||||||
className="w-full h-full rounded-lg"
|
zoom={zoom}
|
||||||
style={{ background: "#111827" }}
|
className="w-full h-full rounded-lg"
|
||||||
>
|
style={{ background: "#111827" }}
|
||||||
<TileLayer
|
>
|
||||||
url="https://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}{r}.png"
|
<LayersControl position="topright">
|
||||||
attribution='© <a href="https://carto.com/">CARTO</a>'
|
{/* Base layers */}
|
||||||
/>
|
<LayersControl.BaseLayer checked name="Dark">
|
||||||
|
<TileLayer
|
||||||
|
url="https://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}{r}.png"
|
||||||
|
attribution='© <a href="https://carto.com/">CARTO</a>'
|
||||||
|
/>
|
||||||
|
</LayersControl.BaseLayer>
|
||||||
|
<LayersControl.BaseLayer name="Light">
|
||||||
|
<TileLayer
|
||||||
|
url="https://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}{r}.png"
|
||||||
|
attribution='© <a href="https://carto.com/">CARTO</a>'
|
||||||
|
/>
|
||||||
|
</LayersControl.BaseLayer>
|
||||||
|
<LayersControl.BaseLayer name="Streets">
|
||||||
|
<TileLayer
|
||||||
|
url="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png"
|
||||||
|
attribution='© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a>'
|
||||||
|
/>
|
||||||
|
</LayersControl.BaseLayer>
|
||||||
|
|
||||||
{/* Node markers */}
|
{/* Overlay: Nodes */}
|
||||||
{nodes.map((node) => (
|
<LayersControl.Overlay checked name="Nodes">
|
||||||
<Marker
|
<FeatureGroup>
|
||||||
key={node.node_id}
|
{nodes.map((node) => (
|
||||||
position={[node.lat, node.lon]}
|
<Marker
|
||||||
icon={nodeIcon(node.status)}
|
key={node.node_id}
|
||||||
>
|
position={[node.lat, node.lon]}
|
||||||
<Popup className="font-mono">
|
icon={nodeIcon(node.status)}
|
||||||
<div className="text-gray-900">
|
>
|
||||||
<p className="font-bold">{node.name}</p>
|
<Popup className="font-mono">
|
||||||
<p className="text-xs text-gray-500">{node.node_id}</p>
|
<div className="text-gray-900">
|
||||||
<p className="text-xs mt-1 capitalize">{node.status}</p>
|
<p className="font-bold">{node.name}</p>
|
||||||
{activeByNode[node.node_id] && (
|
<p className="text-xs text-gray-500">{node.node_id}</p>
|
||||||
<p className="text-xs text-orange-600 mt-1">
|
<p className="text-xs mt-1 capitalize">{node.status}</p>
|
||||||
● TG {activeByNode[node.node_id].talkgroup_id ?? "—"}{" "}
|
{activeByNode[node.node_id] && (
|
||||||
{activeByNode[node.node_id].talkgroup_name}
|
<p className="text-xs text-orange-600 mt-1">
|
||||||
</p>
|
● TG {activeByNode[node.node_id].talkgroup_id ?? "—"}{" "}
|
||||||
)}
|
{activeByNode[node.node_id].talkgroup_name}
|
||||||
</div>
|
</p>
|
||||||
</Popup>
|
)}
|
||||||
</Marker>
|
</div>
|
||||||
))}
|
</Popup>
|
||||||
|
</Marker>
|
||||||
|
))}
|
||||||
|
</FeatureGroup>
|
||||||
|
</LayersControl.Overlay>
|
||||||
|
|
||||||
{/* Incident markers — positioned at the node covering the incident's system */}
|
{/* Overlay: Active Incidents */}
|
||||||
{plottedIncidents.map(({ inc, pos }) => (
|
<LayersControl.Overlay checked name="Active Incidents">
|
||||||
<Marker
|
<FeatureGroup>
|
||||||
key={inc.incident_id}
|
{plottedIncidents.map(({ inc, pos }) => (
|
||||||
position={pos}
|
<Marker
|
||||||
icon={incidentIcon(inc.type)}
|
key={inc.incident_id}
|
||||||
>
|
position={pos}
|
||||||
<Popup className="font-mono">
|
icon={incidentIcon(inc.type)}
|
||||||
<div className="text-gray-900">
|
>
|
||||||
<p className="font-bold">{inc.title ?? "Incident"}</p>
|
<Popup className="font-mono">
|
||||||
<p className="text-xs capitalize" style={{ color: INCIDENT_COLORS[inc.type ?? "other"] ?? INCIDENT_COLORS.other }}>
|
<div className="text-gray-900">
|
||||||
{inc.type ?? "other"}
|
<p className="font-bold">{inc.title ?? "Incident"}</p>
|
||||||
</p>
|
<p className="text-xs capitalize" style={{ color: INCIDENT_COLORS[inc.type ?? "other"] ?? INCIDENT_COLORS.other }}>
|
||||||
<p className="text-xs mt-1 capitalize">{inc.status}</p>
|
{inc.type ?? "other"}
|
||||||
{inc.location && <p className="text-xs text-gray-600 mt-1">{inc.location}</p>}
|
</p>
|
||||||
<p className="text-xs text-gray-500">{inc.call_ids.length} call{inc.call_ids.length !== 1 ? "s" : ""}</p>
|
<p className="text-xs mt-1 capitalize">{inc.status}</p>
|
||||||
{inc.summary && <p className="text-xs mt-1">{inc.summary}</p>}
|
{inc.location && <p className="text-xs text-gray-600 mt-1">{inc.location}</p>}
|
||||||
<a href={`/incidents/${inc.incident_id}`} className="text-xs text-blue-600 hover:underline mt-1 block">
|
<p className="text-xs text-gray-500">{inc.call_ids.length} call{inc.call_ids.length !== 1 ? "s" : ""}</p>
|
||||||
View incident →
|
{inc.summary && <p className="text-xs mt-1">{inc.summary}</p>}
|
||||||
</a>
|
<a href={`/incidents/${inc.incident_id}`} className="text-xs text-blue-600 hover:underline mt-1 block">
|
||||||
</div>
|
View incident →
|
||||||
</Popup>
|
</a>
|
||||||
</Marker>
|
</div>
|
||||||
))}
|
</Popup>
|
||||||
</MapContainer>
|
</Marker>
|
||||||
|
))}
|
||||||
|
</FeatureGroup>
|
||||||
|
</LayersControl.Overlay>
|
||||||
|
</LayersControl>
|
||||||
|
</MapContainer>
|
||||||
|
|
||||||
|
{/* Legend overlay — inside the map wrapper, above tiles */}
|
||||||
|
<div className="absolute bottom-8 left-3 z-[1001] bg-gray-950/90 border border-gray-800 rounded-lg px-3 py-2 text-xs font-mono pointer-events-none space-y-1">
|
||||||
|
<div className="flex items-center gap-2"><span className="text-green-400">●</span> Online</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-orange-400">●</span> Recording</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-indigo-400">●</span> Unconfigured</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-gray-500">●</span> Offline</div>
|
||||||
|
<div className="border-t border-gray-800 my-0.5" />
|
||||||
|
<div className="flex items-center gap-2"><span className="text-red-500">■</span> Fire</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-blue-500">■</span> Police</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-yellow-500">■</span> EMS</div>
|
||||||
|
<div className="flex items-center gap-2"><span className="text-orange-500">■</span> Accident</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
+132
-32
@@ -1,10 +1,12 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
|
import { useState } from "react";
|
||||||
import Link from "next/link";
|
import Link from "next/link";
|
||||||
import { usePathname } from "next/navigation";
|
import { usePathname } from "next/navigation";
|
||||||
import { useUnconfiguredNodes } from "@/lib/useNodes";
|
import { useUnconfiguredNodes } from "@/lib/useNodes";
|
||||||
import { useUnacknowledgedAlerts } from "@/lib/useAlerts";
|
import { useUnacknowledgedAlerts } from "@/lib/useAlerts";
|
||||||
import { useAuth } from "@/components/AuthProvider";
|
import { useAuth } from "@/components/AuthProvider";
|
||||||
|
import { useTheme } from "@/components/ThemeProvider";
|
||||||
|
|
||||||
const links = [
|
const links = [
|
||||||
{ href: "/dashboard", label: "Dashboard" },
|
{ href: "/dashboard", label: "Dashboard" },
|
||||||
@@ -18,50 +20,148 @@ const links = [
|
|||||||
|
|
||||||
const adminLinks = [
|
const adminLinks = [
|
||||||
{ href: "/tokens", label: "Tokens" },
|
{ href: "/tokens", label: "Tokens" },
|
||||||
|
{ href: "/admin", label: "Admin" },
|
||||||
];
|
];
|
||||||
|
|
||||||
|
function SunIcon() {
|
||||||
|
return (
|
||||||
|
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||||
|
<circle cx="12" cy="12" r="5"/>
|
||||||
|
<line x1="12" y1="1" x2="12" y2="3"/>
|
||||||
|
<line x1="12" y1="21" x2="12" y2="23"/>
|
||||||
|
<line x1="4.22" y1="4.22" x2="5.64" y2="5.64"/>
|
||||||
|
<line x1="18.36" y1="18.36" x2="19.78" y2="19.78"/>
|
||||||
|
<line x1="1" y1="12" x2="3" y2="12"/>
|
||||||
|
<line x1="21" y1="12" x2="23" y2="12"/>
|
||||||
|
<line x1="4.22" y1="19.78" x2="5.64" y2="18.36"/>
|
||||||
|
<line x1="18.36" y1="5.64" x2="19.78" y2="4.22"/>
|
||||||
|
</svg>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function MoonIcon() {
|
||||||
|
return (
|
||||||
|
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||||
|
<path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z"/>
|
||||||
|
</svg>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
export function Nav() {
|
export function Nav() {
|
||||||
const { user, isAdmin, signOut } = useAuth();
|
const { user, isAdmin, signOut } = useAuth();
|
||||||
const pathname = usePathname();
|
const pathname = usePathname();
|
||||||
const { nodes: pending } = useUnconfiguredNodes();
|
const { nodes: pending } = useUnconfiguredNodes();
|
||||||
const unackedAlerts = useUnacknowledgedAlerts();
|
const unackedAlerts = useUnacknowledgedAlerts();
|
||||||
|
const { theme, toggle } = useTheme();
|
||||||
|
const [mobileOpen, setMobileOpen] = useState(false);
|
||||||
|
|
||||||
if (!user) return null;
|
if (!user) return null;
|
||||||
|
|
||||||
|
const allLinks = [...links, ...(isAdmin ? adminLinks : [])];
|
||||||
|
|
||||||
|
function navLinkClass(href: string) {
|
||||||
|
return `text-sm font-mono transition-colors shrink-0 ${
|
||||||
|
pathname.startsWith(href) ? "text-white" : "text-gray-500 hover:text-gray-300"
|
||||||
|
}`;
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<nav className="border-b border-gray-800 bg-gray-950 px-6 py-3 flex items-center gap-6 overflow-x-auto">
|
<nav className="sticky top-0 z-40 border-b border-gray-800 bg-gray-950/95 backdrop-blur">
|
||||||
<span className="font-mono font-bold text-white tracking-tight mr-4 shrink-0">DRB</span>
|
{/* Main bar */}
|
||||||
{[...links, ...(isAdmin ? adminLinks : [])].map(({ href, label }) => (
|
<div className="px-4 md:px-6 py-3 flex items-center gap-4 md:gap-6">
|
||||||
<Link
|
<span className="font-mono font-bold text-white tracking-tight shrink-0">DRB</span>
|
||||||
key={href}
|
|
||||||
href={href}
|
{/* Desktop links */}
|
||||||
className={`text-sm font-mono transition-colors shrink-0 ${
|
<div className="hidden md:flex items-center gap-6 overflow-x-auto">
|
||||||
pathname.startsWith(href)
|
{allLinks.map(({ href, label }) => (
|
||||||
? "text-white"
|
<Link key={href} href={href} className={navLinkClass(href)}>
|
||||||
: "text-gray-500 hover:text-gray-300"
|
{label}
|
||||||
}`}
|
{label === "Nodes" && pending.length > 0 && (
|
||||||
>
|
<span className="ml-1.5 inline-flex items-center justify-center w-4 h-4 rounded-full bg-yellow-500 text-gray-950 text-xs font-bold">
|
||||||
{label}
|
{pending.length}
|
||||||
{label === "Nodes" && pending.length > 0 && (
|
</span>
|
||||||
<span className="ml-1.5 inline-flex items-center justify-center w-4 h-4 rounded-full bg-yellow-500 text-gray-950 text-xs font-bold">
|
)}
|
||||||
{pending.length}
|
{label === "Alerts" && unackedAlerts.length > 0 && (
|
||||||
</span>
|
<span className="ml-1.5 inline-flex items-center justify-center min-w-[1rem] h-4 rounded-full bg-red-600 text-white text-xs font-bold px-1">
|
||||||
)}
|
{unackedAlerts.length}
|
||||||
{label === "Alerts" && unackedAlerts.length > 0 && (
|
</span>
|
||||||
<span className="ml-1.5 inline-flex items-center justify-center min-w-[1rem] h-4 rounded-full bg-red-600 text-white text-xs font-bold px-1">
|
)}
|
||||||
{unackedAlerts.length}
|
</Link>
|
||||||
</span>
|
))}
|
||||||
)}
|
</div>
|
||||||
</Link>
|
|
||||||
))}
|
<div className="ml-auto flex items-center gap-3 shrink-0">
|
||||||
<div className="ml-auto shrink-0">
|
{/* Theme toggle */}
|
||||||
<button
|
<button
|
||||||
onClick={signOut}
|
onClick={toggle}
|
||||||
className="text-sm font-mono text-gray-500 hover:text-gray-300 transition-colors"
|
className="text-gray-500 hover:text-gray-300 transition-colors"
|
||||||
>
|
title={theme === "dark" ? "Switch to light mode" : "Switch to dark mode"}
|
||||||
Sign out
|
>
|
||||||
</button>
|
{theme === "dark" ? <SunIcon /> : <MoonIcon />}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Sign out (desktop) */}
|
||||||
|
<button
|
||||||
|
onClick={signOut}
|
||||||
|
className="hidden md:block text-sm font-mono text-gray-500 hover:text-gray-300 transition-colors"
|
||||||
|
>
|
||||||
|
Sign out
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Hamburger (mobile) */}
|
||||||
|
<button
|
||||||
|
onClick={() => setMobileOpen((v) => !v)}
|
||||||
|
className="md:hidden text-gray-400 hover:text-gray-200 transition-colors p-1"
|
||||||
|
aria-label="Toggle menu"
|
||||||
|
>
|
||||||
|
{mobileOpen ? (
|
||||||
|
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round">
|
||||||
|
<line x1="18" y1="6" x2="6" y2="18"/><line x1="6" y1="6" x2="18" y2="18"/>
|
||||||
|
</svg>
|
||||||
|
) : (
|
||||||
|
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round">
|
||||||
|
<line x1="3" y1="12" x2="21" y2="12"/><line x1="3" y1="6" x2="21" y2="6"/><line x1="3" y1="18" x2="21" y2="18"/>
|
||||||
|
</svg>
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Mobile drawer */}
|
||||||
|
{mobileOpen && (
|
||||||
|
<div className="md:hidden border-t border-gray-800 bg-gray-950 px-4 py-3 flex flex-col gap-1">
|
||||||
|
{allLinks.map(({ href, label }) => (
|
||||||
|
<Link
|
||||||
|
key={href}
|
||||||
|
href={href}
|
||||||
|
onClick={() => setMobileOpen(false)}
|
||||||
|
className={`py-2 text-sm font-mono transition-colors flex items-center gap-2 ${
|
||||||
|
pathname.startsWith(href) ? "text-white" : "text-gray-500"
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{label}
|
||||||
|
{label === "Nodes" && pending.length > 0 && (
|
||||||
|
<span className="inline-flex items-center justify-center w-4 h-4 rounded-full bg-yellow-500 text-gray-950 text-xs font-bold">
|
||||||
|
{pending.length}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{label === "Alerts" && unackedAlerts.length > 0 && (
|
||||||
|
<span className="inline-flex items-center justify-center min-w-[1rem] h-4 rounded-full bg-red-600 text-white text-xs font-bold px-1">
|
||||||
|
{unackedAlerts.length}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</Link>
|
||||||
|
))}
|
||||||
|
<div className="border-t border-gray-800 pt-3 mt-1">
|
||||||
|
<button
|
||||||
|
onClick={signOut}
|
||||||
|
className="text-sm font-mono text-gray-500 hover:text-gray-300 transition-colors"
|
||||||
|
>
|
||||||
|
Sign out
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</nav>
|
</nav>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,34 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import React, { createContext, useContext, useEffect, useState } from "react";
|
||||||
|
|
||||||
|
type Theme = "dark" | "light";
|
||||||
|
|
||||||
|
const ThemeContext = createContext<{ theme: Theme; toggle: () => void }>({
|
||||||
|
theme: "dark",
|
||||||
|
toggle: () => {},
|
||||||
|
});
|
||||||
|
|
||||||
|
export function useTheme() {
|
||||||
|
return useContext(ThemeContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ThemeProvider({ children }: { children: React.ReactNode }) {
|
||||||
|
const [theme, setTheme] = useState<Theme>("dark");
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const saved = localStorage.getItem("drb-theme") as Theme | null;
|
||||||
|
if (saved === "light") setTheme("light");
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
document.documentElement.classList.toggle("dark", theme === "dark");
|
||||||
|
localStorage.setItem("drb-theme", theme);
|
||||||
|
}, [theme]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ThemeContext.Provider value={{ theme, toggle: () => setTheme((t) => (t === "dark" ? "light" : "dark")) }}>
|
||||||
|
{children}
|
||||||
|
</ThemeContext.Provider>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -93,4 +93,39 @@ export const c2api = {
|
|||||||
// Node key management
|
// Node key management
|
||||||
reissueNodeKey: (nodeId: string) =>
|
reissueNodeKey: (nodeId: string) =>
|
||||||
request(`/nodes/${nodeId}/reissue-key`, { method: "POST" }),
|
request(`/nodes/${nodeId}/reissue-key`, { method: "POST" }),
|
||||||
|
|
||||||
|
// Ten-codes
|
||||||
|
getTenCodes: (systemId: string) =>
|
||||||
|
request<{ ten_codes: Record<string, string> }>(`/systems/${systemId}/ten-codes`),
|
||||||
|
updateTenCodes: (systemId: string, ten_codes: Record<string, string>) =>
|
||||||
|
request(`/systems/${systemId}/ten-codes`, { method: "PUT", body: JSON.stringify({ ten_codes }) }),
|
||||||
|
|
||||||
|
// Vocabulary
|
||||||
|
getVocabulary: (systemId: string) =>
|
||||||
|
request<{ vocabulary: string[]; vocabulary_pending: { term: string; source: "induction" | "correction"; added_at: string }[]; vocabulary_bootstrapped: boolean }>(
|
||||||
|
`/systems/${systemId}/vocabulary`
|
||||||
|
),
|
||||||
|
bootstrapVocabulary: (systemId: string) =>
|
||||||
|
request<{ added: number; terms: string[] }>(`/systems/${systemId}/vocabulary/bootstrap`, { method: "POST" }),
|
||||||
|
addVocabularyTerm: (systemId: string, term: string) =>
|
||||||
|
request(`/systems/${systemId}/vocabulary/terms`, { method: "POST", body: JSON.stringify({ term }) }),
|
||||||
|
removeVocabularyTerm: (systemId: string, term: string) =>
|
||||||
|
request(`/systems/${systemId}/vocabulary/terms`, { method: "DELETE", body: JSON.stringify({ term }) }),
|
||||||
|
approvePendingTerm: (systemId: string, term: string) =>
|
||||||
|
request(`/systems/${systemId}/vocabulary/pending/approve`, { method: "POST", body: JSON.stringify({ term }) }),
|
||||||
|
dismissPendingTerm: (systemId: string, term: string) =>
|
||||||
|
request(`/systems/${systemId}/vocabulary/pending/dismiss`, { method: "POST", body: JSON.stringify({ term }) }),
|
||||||
|
|
||||||
|
// Feature flags (admin)
|
||||||
|
getFeatureFlags: () =>
|
||||||
|
request<Record<string, boolean>>("/admin/features"),
|
||||||
|
setFeatureFlags: (flags: Record<string, boolean>) =>
|
||||||
|
request<Record<string, boolean>>("/admin/features", { method: "PUT", body: JSON.stringify(flags) }),
|
||||||
|
|
||||||
|
// Per-system AI flag overrides
|
||||||
|
setSystemAiFlags: (systemId: string, flags: { stt_enabled?: boolean | null; correlation_enabled?: boolean | null }) =>
|
||||||
|
request<{ ok: boolean; ai_flags: Record<string, boolean> }>(`/systems/${systemId}/ai-flags`, {
|
||||||
|
method: "PUT",
|
||||||
|
body: JSON.stringify(flags),
|
||||||
|
}),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -13,11 +13,21 @@ export interface NodeRecord {
|
|||||||
approval_status: ApprovalStatus | null;
|
approval_status: ApprovalStatus | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface VocabularyPendingTerm {
|
||||||
|
term: string;
|
||||||
|
source: "induction" | "correction";
|
||||||
|
added_at: string;
|
||||||
|
}
|
||||||
|
|
||||||
export interface SystemRecord {
|
export interface SystemRecord {
|
||||||
system_id: string;
|
system_id: string;
|
||||||
name: string;
|
name: string;
|
||||||
type: string; // P25 | DMR | NBFM
|
type: string; // P25 | DMR | NBFM
|
||||||
config: Record<string, unknown>;
|
config: Record<string, unknown>;
|
||||||
|
vocabulary?: string[];
|
||||||
|
vocabulary_pending?: VocabularyPendingTerm[];
|
||||||
|
vocabulary_bootstrapped?: boolean;
|
||||||
|
ten_codes?: Record<string, string>; // {"10-10": "Commercial Alarm", ...}
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface TranscriptSegment {
|
export interface TranscriptSegment {
|
||||||
@@ -39,10 +49,20 @@ export interface CallRecord {
|
|||||||
transcript: string | null;
|
transcript: string | null;
|
||||||
transcript_corrected: string | null;
|
transcript_corrected: string | null;
|
||||||
segments: TranscriptSegment[] | null;
|
segments: TranscriptSegment[] | null;
|
||||||
incident_id: string | null;
|
/** New: one entry per scene detected in the recording. */
|
||||||
|
incident_ids: string[];
|
||||||
|
/** Legacy field — present on calls recorded before the multi-scene migration. */
|
||||||
|
incident_id?: string | null;
|
||||||
location: string | null;
|
location: string | null;
|
||||||
tags: string[];
|
tags: string[];
|
||||||
status: "active" | "ended";
|
status: "active" | "ended";
|
||||||
|
// Correlation debug — written by the correlator, present after a call is linked
|
||||||
|
corr_path?: string | null;
|
||||||
|
corr_score?: number | null;
|
||||||
|
corr_distance_km?: number | null;
|
||||||
|
corr_incident_idle_min?: number | null;
|
||||||
|
corr_shared_units?: number | null;
|
||||||
|
corr_candidates?: number | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface IncidentRecord {
|
export interface IncidentRecord {
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ export function useCallsByIncident(incidentId: string | null) {
|
|||||||
const toISO = (v: any): string | null =>
|
const toISO = (v: any): string | null =>
|
||||||
v?.toDate?.()?.toISOString?.() ?? (typeof v === "string" ? v : null);
|
v?.toDate?.()?.toISOString?.() ?? (typeof v === "string" ? v : null);
|
||||||
|
|
||||||
const q = query(collection(db, "calls"), where("incident_id", "==", incidentId));
|
const q = query(collection(db, "calls"), where("incident_ids", "array-contains", incidentId));
|
||||||
unsubFirestore = onSnapshot(q, (snap) => {
|
unsubFirestore = onSnapshot(q, (snap) => {
|
||||||
const docs = snap.docs.map((d) => {
|
const docs = snap.docs.map((d) => {
|
||||||
const data = d.data();
|
const data = d.data();
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ const config: Config = {
|
|||||||
"./app/**/*.{ts,tsx}",
|
"./app/**/*.{ts,tsx}",
|
||||||
"./components/**/*.{ts,tsx}",
|
"./components/**/*.{ts,tsx}",
|
||||||
],
|
],
|
||||||
|
darkMode: ["class"],
|
||||||
theme: {
|
theme: {
|
||||||
extend: {
|
extend: {
|
||||||
fontFamily: {
|
fontFamily: {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ FROM python:3.14-slim
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install uv && uv pip install --system --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
COPY app/ ./app/
|
COPY app/ ./app/
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user