22 Commits

Author SHA1 Message Date
0a58624e50 Merge pull request 'Implement Call Recording for STT and Replay' (#3) from implement-call-recording into main
All checks were successful
release-tag / release-image (push) Successful in 1h26m13s
Reviewed-on: #3
2026-01-03 19:38:04 -05:00
Logan Cusano
10554a2ff4 Properly add ffmpeg to the dockerfile install sequence 2026-01-03 19:32:20 -05:00
Logan Cusano
051eac88b0 Add call ID to the call metadata 2026-01-03 19:18:30 -05:00
Logan Cusano
d8190e307c Standardize timestamps to UTC 2026-01-03 11:41:27 -05:00
Logan Cusano
83b995bfa5 Fix bootleg AI mistake 2026-01-03 03:12:57 -05:00
Logan Cusano
9e92da4e58 Replace http server vars with dedicated vars 2026-01-03 03:10:45 -05:00
Logan Cusano
0fe8194c39 fix upload url 2026-01-02 00:17:36 -05:00
Logan Cusano
8c106473cf Move bucket upload to the c2 server and replaced with upload to c2 server 2025-12-30 03:01:31 -05:00
Logan Cusano
a5d5fa9de7 Install ffmpeg to test if that resolves issue with recording 2025-12-29 22:55:18 -05:00
Logan Cusano
a7de6bfb04 Fix the calls directory bug 2025-12-29 22:47:18 -05:00
Logan Cusano
3b98e3a72a Add GCP to the requirements 2025-12-29 22:21:58 -05:00
Logan Cusano
41075a5950 init 2025-12-29 22:18:58 -05:00
de143a67fe Merge pull request 'Implement Metadata Watcher' (#1) from metadata-watcher into main
All checks were successful
release-tag / release-image (push) Successful in 1h26m24s
Reviewed-on: #1
2025-12-29 19:04:07 -05:00
Logan Cusano
ee9ce0e140 Add the radio ID to the metadata payload to track who is talking, not just what system 2025-12-29 19:02:51 -05:00
Logan Cusano
ca984be293 Implement debug logging into metadata watcher 2025-12-29 15:48:45 -05:00
Logan Cusano
b8ee991192 Update port in docker compose and update metadata watcher function to use correct OP@5 endpoint 2025-12-29 15:23:18 -05:00
Logan Cusano
0a6b565651 Fix bug in op25 config where it would not create liquidsoap if saved config was loaded 2025-12-29 15:06:48 -05:00
Logan Cusano
269ce033eb Updated op25 config functions 2025-12-29 14:09:53 -05:00
Logan Cusano
c481db6702 Update gitignore for configs 2025-12-29 14:09:26 -05:00
Logan Cusano
e740b46bfe Add example env file 2025-12-29 13:52:23 -05:00
Logan Cusano
bae50463a7 Merge remote-tracking branch 'origin/main' into metadata-watcher 2025-12-29 03:42:07 -05:00
Logan Cusano
0d9feb2658 first attempt 2025-12-29 00:34:28 -05:00
7 changed files with 399 additions and 56 deletions

9
.env.example Normal file
View File

@@ -0,0 +1,9 @@
NODE_ID=
MQTT_BROKER=
ICECAST_SERVER=
AUDIO_BUCKET=
NODE_LAT=
NODE_LONG=
HTTP_SERVER_PROTOCOL=
HTTP_SERVER_ADDRESS=
HTTP_SERVER_PORT=

3
.gitignore vendored
View File

@@ -2,4 +2,5 @@
*.log *.log
*.db *.db
*.conf *.conf
config/* configs/*
*.json

View File

@@ -7,7 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
# Install system dependencies # Install system dependencies
RUN apt-get update && \ RUN apt-get update && \
apt-get upgrade -y && \ apt-get upgrade -y && \
apt-get install git pulseaudio pulseaudio-utils liquidsoap -y apt-get install git pulseaudio pulseaudio-utils liquidsoap ffmpeg -y
# Clone the boatbod op25 repository # Clone the boatbod op25 repository
RUN git clone -b gr310 https://github.com/boatbod/op25 /op25 RUN git clone -b gr310 https://github.com/boatbod/op25 /op25
@@ -34,6 +34,9 @@ EXPOSE 8001 8081
# Create and set up the configuration directory # Create and set up the configuration directory
VOLUME ["/configs"] VOLUME ["/configs"]
# Create the calls local cache directory
VOLUME ["/calls"]
# Set the working directory in the container # Set the working directory in the container
WORKDIR /app WORKDIR /app

View File

@@ -2,9 +2,11 @@ import csv
import json import json
import os import os
import shutil import shutil
from models.models import TalkgroupTag from pathlib import Path
from models.models import TalkgroupTag, IcecastConfig
from typing import List, Dict from typing import List, Dict
from internal.logger import create_logger from internal.logger import create_logger
from internal.liquidsoap_config_utils import generate_liquid_script
LOGGER = create_logger(__name__) LOGGER = create_logger(__name__)
@@ -28,8 +30,8 @@ def scan_local_library() -> List[Dict]:
# Use trunking sysname or filename as the identifier # Use trunking sysname or filename as the identifier
sys_name = data.get("trunking", {}).get("sysname", filename.replace(".json", "")) sys_name = data.get("trunking", {}).get("sysname", filename.replace(".json", ""))
library.append({ library.append({
"name": sys_name, "system_name": sys_name,
"system_name": filename, "filename": filename,
"mode": "P25" if "trunking" in data else "NBFM" "mode": "P25" if "trunking" in data else "NBFM"
}) })
except Exception as e: except Exception as e:
@@ -44,16 +46,48 @@ def activate_config_from_library(system_name: str) -> bool:
if not system_name.endswith(".json"): if not system_name.endswith(".json"):
system_name += ".json" system_name += ".json"
src = os.path.join(CONFIG_DIR, system_name) config_path = Path(CONFIG_DIR)
dst = os.path.join(CONFIG_DIR, "active.cfg.json") src = config_path / system_name
dst = config_path / "active.cfg.json"
if not os.path.exists(src): if not src.exists():
LOGGER.error(f"Source config {system_name} not found in library.") LOGGER.error(f"Source config {system_name} not found in library.")
return False return False
try: try:
shutil.copy2(src, dst) shutil.copy2(src, dst)
LOGGER.info(f"Activated config: {system_name}") LOGGER.info(f"Activated config: {system_name}")
# Copy sidecar files (tags/whitelist) if they exist
src_tags = src.with_suffix(".tags.tsv")
if src_tags.exists():
shutil.copy2(src_tags, config_path / "active.cfg.tags.tsv")
src_whitelist = src.with_suffix(".whitelist.tsv")
if src_whitelist.exists():
shutil.copy2(src_whitelist, config_path / "active.cfg.whitelist.tsv")
# Generate Liquidsoap Script by reading the activated config
with open(dst, 'r') as f:
data = json.load(f)
if "trunking" in data and "metadata" in data:
streams = data.get("metadata", {}).get("streams", [])
if streams:
stream = streams[0]
address = stream.get("icecastServerAddress", "127.0.0.1:8000")
host, port = address.split(":") if ":" in address else (address, 8000)
ice_config = IcecastConfig(
icecast_host=host,
icecast_port=int(port),
icecast_mountpoint=stream.get("icecastMountpoint", "/stream"),
icecast_password=stream.get("icecastPass", "hackme"),
icecast_description="OP25 Stream",
icecast_genre="Scanner"
)
generate_liquid_script(ice_config)
return True return True
except Exception as e: except Exception as e:
LOGGER.error(f"Failed to copy config: {e}") LOGGER.error(f"Failed to copy config: {e}")
@@ -88,14 +122,16 @@ def get_current_active_config() -> Dict:
return {} return {}
return {} return {}
def save_talkgroup_tags(talkgroup_tags: List[TalkgroupTag]) -> None: def save_talkgroup_tags(talkgroup_tags: List[TalkgroupTag], prefix: str = "active.cfg") -> None:
with open(os.path.join(CONFIG_DIR, "active.cfg.tags.tsv"), 'w', newline='', encoding='utf-8') as file: filename = f"{prefix}.tags.tsv"
with open(os.path.join(CONFIG_DIR, filename), 'w', newline='', encoding='utf-8') as file:
writer = csv.writer(file, delimiter='\t', lineterminator='\n') writer = csv.writer(file, delimiter='\t', lineterminator='\n')
for tag in talkgroup_tags: for tag in talkgroup_tags:
writer.writerow([tag.tagDec, tag.talkgroup]) writer.writerow([tag.tagDec, tag.talkgroup])
def save_whitelist(talkgroup_tags: List[int]) -> None: def save_whitelist(talkgroup_tags: List[int], prefix: str = "active.cfg") -> None:
with open(os.path.join(CONFIG_DIR, "active.cfg.whitelist.tsv"), 'w', newline='', encoding='utf-8') as file: filename = f"{prefix}.whitelist.tsv"
with open(os.path.join(CONFIG_DIR, filename), 'w', newline='', encoding='utf-8') as file:
writer = csv.writer(file, delimiter='\t', lineterminator='\n') writer = csv.writer(file, delimiter='\t', lineterminator='\n')
for tag in talkgroup_tags: for tag in talkgroup_tags:
writer.writerow([tag]) writer.writerow([tag])

View File

@@ -20,6 +20,9 @@ app.include_router(create_op25_router(), prefix="/op25")
# Configuration # Configuration
NODE_ID = os.getenv("NODE_ID", "standalone-node") NODE_ID = os.getenv("NODE_ID", "standalone-node")
MQTT_BROKER = os.getenv("MQTT_BROKER", None) MQTT_BROKER = os.getenv("MQTT_BROKER", None)
HTTP_SERVER_PROTOCOL = os.getenv("HTTP_SERVER_PROTOCOL", "http")
HTTP_SERVER_ADDRESS = os.getenv("HTTP_SERVER_ADDRESS", "127.0.0.1")
HTTP_SERVER_PORT = os.getenv("HTTP_SERVER_PORT", 8000)
NODE_LAT = os.getenv("NODE_LAT") NODE_LAT = os.getenv("NODE_LAT")
NODE_LONG = os.getenv("NODE_LONG") NODE_LONG = os.getenv("NODE_LONG")
@@ -112,6 +115,37 @@ def handle_c2_command(topic, payload):
except Exception as e: except Exception as e:
LOGGER.error(f"Error processing C2 command: {e}") LOGGER.error(f"Error processing C2 command: {e}")
def get_current_stream_url():
"""
Dynamically resolves the audio stream URL from the active OP25 configuration.
Falls back to env var or default if config is missing/invalid.
"""
default_url = os.getenv("STREAM_URL", "http://127.0.0.1:8000/stream_0")
config_path = "/configs/active.cfg.json"
if not os.path.exists(config_path):
return default_url
try:
with open(config_path, "r") as f:
config = json.load(f)
streams = config.get("metadata", {}).get("streams", [])
if not streams:
return default_url
stream = streams[0]
address = stream.get("icecastServerAddress", "127.0.0.1:8000")
mount = stream.get("icecastMountpoint", "stream_0")
if not mount.startswith("/"):
mount = f"/{mount}"
return f"http://{address}{mount}"
except Exception as e:
LOGGER.warning(f"Failed to resolve stream URL from config: {e}")
return default_url
async def mqtt_lifecycle_manager(): async def mqtt_lifecycle_manager():
""" """
Manages the application-level logic: Check-in, Heartbeats, and Shutdown. Manages the application-level logic: Check-in, Heartbeats, and Shutdown.
@@ -153,7 +187,7 @@ async def mqtt_lifecycle_manager():
payload = { payload = {
"node_id": NODE_ID, "node_id": NODE_ID,
"status": "online", "status": "online",
"timestamp": datetime.now().isoformat(), "timestamp": datetime.utcnow().isoformat(),
"is_listening": op25_status.get("is_running", False), "is_listening": op25_status.get("is_running", False),
"active_system": op25_status.get("active_system"), "active_system": op25_status.get("active_system"),
# Only scan library if needed, otherwise it's heavy I/O # Only scan library if needed, otherwise it's heavy I/O
@@ -183,10 +217,193 @@ async def mqtt_lifecycle_manager():
lwt_payload = json.dumps({"status": "offline", "reason": "unexpected_disconnect"}) lwt_payload = json.dumps({"status": "offline", "reason": "unexpected_disconnect"})
client.will_set(f"nodes/{NODE_ID}/status", lwt_payload, qos=1, retain=True) client.will_set(f"nodes/{NODE_ID}/status", lwt_payload, qos=1, retain=True)
async def metadata_watcher():
"""
Polls OP25 HTTP terminal for metadata and publishes events to MQTT.
Corrected to use the POST-based command API found in the HAR capture.
"""
last_tgid = 0
last_metadata = {}
potential_end_time = None
DEBOUNCE_SECONDS = 2.5
OP25_DATA_URL = "http://127.0.0.1:8081/"
# This is the specific payload the OP25 web interface uses [cite: 45562, 45563]
COMMAND_PAYLOAD = [{"command": "update", "arg1": 0, "arg2": 0}]
# Audio Recording State
recorder_proc = None
current_call_id = None
async def stop_recording():
nonlocal recorder_proc
if recorder_proc:
if recorder_proc.returncode is None:
recorder_proc.terminate()
try:
await asyncio.wait_for(recorder_proc.wait(), timeout=2.0)
except asyncio.TimeoutError:
recorder_proc.kill()
recorder_proc = None
def upload_audio(call_id):
if not MQTT_BROKER: return None
local_path = f"/calls/{call_id}.mp3"
if not os.path.exists(local_path): return None
try:
with open(local_path, "rb") as f:
files = {"file": (f"{call_id}.mp3", f, "audio/mpeg")}
response = requests.post(f"{HTTP_SERVER_PROTOCOL}://{HTTP_SERVER_ADDRESS}:{HTTP_SERVER_PORT}/upload", files=files, data={"node_id": NODE_ID, "call_id": call_id}, timeout=30)
response.raise_for_status()
return response.json().get("url")
except Exception as e:
LOGGER.error(f"Upload failed: {e}")
return None
finally:
if os.path.exists(local_path):
os.remove(local_path)
while True:
if not MQTT_CONNECTED:
await asyncio.sleep(1)
continue
try:
# Run blocking POST request in executor
loop = asyncio.get_running_loop()
response = await loop.run_in_executor(
None,
lambda: requests.post(OP25_DATA_URL, json=COMMAND_PAYLOAD, timeout=0.5)
)
if response.status_code == 200:
data = response.json()
# LOGGER.debug(f"Response from OP25 API: {data}")
current_tgid = 0
current_meta = {}
# The response is an array of update objects
for item in data:
if item.get("json_type") == "channel_update":
# The terminal provides channel info keyed by channel index (e.g., "0")
# We look for the first channel that has an active TGID
for key in item:
if key.isdigit():
ch = item[key]
t = ch.get("tgid")
# OP25 returns null or 0 when no talkgroup is active
if t and int(t) > 0:
current_tgid = int(t)
current_meta = {
"tgid": str(t),
"rid": str(ch.get("srcaddr", "")).strip(),
"alpha_tag": str(ch.get("tag", "")).strip(),
"frequency": str(ch.get("freq", 0)),
"sysname": str(ch.get("system", "")).strip()
}
break
if current_tgid: break
now = datetime.utcnow()
# Logic for handling call start/end events
if current_tgid != 0:
potential_end_time = None
if current_tgid != last_tgid:
if last_tgid != 0:
# --- END PREVIOUS CALL ---
await stop_recording()
audio_url = None
if current_call_id:
audio_url = await loop.run_in_executor(None, upload_audio, current_call_id)
LOGGER.debug(f"Switching TGID: {last_tgid} -> {current_tgid}")
payload = {
"node_id": NODE_ID,
"timestamp": now.isoformat(),
"event": "call_end",
"metadata": last_metadata,
"audio_url": audio_url,
"call_id": current_call_id
}
client.publish(f"nodes/{NODE_ID}/metadata", json.dumps(payload), qos=0)
# --- START NEW CALL ---
LOGGER.debug(f"Call Start: TGID {current_tgid} ({current_meta.get('alpha_tag')})")
# Generate ID
start_ts = int(now.timestamp())
sysname = current_meta.get('sysname', 'unknown')
tgid = current_meta.get('tgid', '0')
current_call_id = f"{NODE_ID}_{sysname}_{tgid}_{start_ts}"
# Start Recording (FFmpeg)
try:
stream_url = get_current_stream_url()
recorder_proc = await asyncio.create_subprocess_exec(
"ffmpeg", "-i", stream_url, "-y", "-t", "300",
f"/calls/{current_call_id}.mp3",
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
)
except Exception as e:
LOGGER.error(f"Failed to start recorder: {e}")
payload = {
"node_id": NODE_ID,
"timestamp": now.isoformat(),
"event": "call_start",
"metadata": current_meta,
"call_id": current_call_id
}
client.publish(f"nodes/{NODE_ID}/metadata", json.dumps(payload), qos=0)
last_tgid = current_tgid
last_metadata = current_meta
elif last_tgid != 0:
if potential_end_time is None:
LOGGER.debug(f"Signal lost for TGID {last_tgid}. Starting debounce.")
potential_end_time = now
elif (now - potential_end_time).total_seconds() > DEBOUNCE_SECONDS:
# --- END CALL (Debounce Expired) ---
await stop_recording()
audio_url = None
if current_call_id:
audio_url = await loop.run_in_executor(None, upload_audio, current_call_id)
LOGGER.debug(f"Call End (Debounce expired): TGID {last_tgid}")
payload = {
"node_id": NODE_ID,
"timestamp": now.isoformat(),
"event": "call_end",
"metadata": last_metadata,
"audio_url": audio_url,
"call_id": current_call_id
}
client.publish(f"nodes/{NODE_ID}/metadata", json.dumps(payload), qos=0)
last_tgid = 0
last_metadata = {}
potential_end_time = None
current_call_id = None
else:
LOGGER.debug(f"OP25 API returned status: {response.status_code}")
except Exception as e:
LOGGER.warning(f"Metadata watcher error: {e}")
await asyncio.sleep(0.25)
try: try:
client.connect(MQTT_BROKER, 1883, 60) client.connect(MQTT_BROKER, 1883, 60)
client.loop_start() # Run network loop in background thread client.loop_start() # Run network loop in background thread
# Start the metadata watcher task
watcher_task = asyncio.create_task(metadata_watcher())
# --- Main Heartbeat Loop --- # --- Main Heartbeat Loop ---
while True: while True:
if MQTT_CONNECTED: if MQTT_CONNECTED:
@@ -194,10 +411,20 @@ async def mqtt_lifecycle_manager():
# Pulse every 30 seconds # Pulse every 30 seconds
# Only wait 30 sec if the HB sent. This way we don't stall a check-in # Only wait 30 sec if the HB sent. This way we don't stall a check-in
await asyncio.sleep(30) await asyncio.sleep(30)
else:
await asyncio.sleep(5)
except asyncio.CancelledError: except asyncio.CancelledError:
LOGGER.info("Stopping MQTT Loop...") LOGGER.info("Stopping MQTT Loop...")
finally: finally:
# Cancel watcher
if 'watcher_task' in locals():
watcher_task.cancel()
try:
await watcher_task
except asyncio.CancelledError:
pass
# Graceful Shutdown: Explicitly tell C2 we are leaving # Graceful Shutdown: Explicitly tell C2 we are leaving
if MQTT_CONNECTED: if MQTT_CONNECTED:
shutdown_payload = json.dumps({"status": "offline", "reason": "clean_shutdown"}) shutdown_payload = json.dumps({"status": "offline", "reason": "clean_shutdown"})

View File

@@ -61,6 +61,77 @@ async def start_op25_logic():
return False return False
return False return False
def build_op25_config(generator: ConfigGenerator) -> dict:
if generator.type == DecodeMode.P25:
channels = [ChannelConfig(
name=generator.systemName,
trunking_sysname=generator.systemName,
enable_analog="off",
demod_type="cqpsk",
cqpsk_tracking=True,
filter_type="rc",
meta_stream_name="stream_0"
)]
devices = [DeviceConfig()]
trunking = TrunkingConfig(
module="tk_p25.py",
chans=[TrunkingChannelConfig(
sysname=generator.systemName,
control_channel_list=','.join(generator.channels),
tagsFile="/configs/active.cfg.tags.tsv",
whitelist="/configs/active.cfg.whitelist.tsv"
)]
)
metadata = MetadataConfig(
streams=[
MetadataStreamConfig(
stream_name="stream_0",
icecastServerAddress = f"{generator.icecastConfig.icecast_host}:{generator.icecastConfig.icecast_port}",
icecastMountpoint = generator.icecastConfig.icecast_mountpoint,
icecastPass = generator.icecastConfig.icecast_password
)
]
)
terminal = TerminalConfig()
return {
"channels": [channel.dict() for channel in channels],
"devices": [device.dict() for device in devices],
"trunking": trunking.dict(),
"metadata": metadata.dict(),
"terminal": terminal.dict()
}
elif generator.type == DecodeMode.ANALOG:
analog_config = generator.config
channels = [ChannelConfig(
channelName=analog_config.systemName,
enableAnalog="on",
demodType="fsk4",
frequency=analog_config.frequency,
filterType="widepulse",
nbfmSquelch=analog_config.nbfmSquelch
)]
devices = [DeviceConfig(gain="LNA:32")]
return {
"channels": [channel.dict() for channel in channels],
"devices": [device.dict() for device in devices]
}
else:
raise HTTPException(status_code=400, detail="Invalid decode mode")
def save_library_sidecars(system_name: str, generator: ConfigGenerator):
if generator.type == DecodeMode.P25:
prefix = system_name
if prefix.endswith(".json"):
prefix = prefix[:-5]
save_talkgroup_tags(generator.tags, prefix)
save_whitelist(generator.whitelist, prefix)
def create_op25_router(): def create_op25_router():
router = APIRouter() router = APIRouter()
@@ -93,46 +164,31 @@ def create_op25_router():
active.cfg.json, and optionally restarts the radio. active.cfg.json, and optionally restarts the radio.
""" """
try: try:
if generator.type == DecodeMode.P25: # 1. Build the configuration dictionary
# 1. Handle sidecar files (Tags/Whitelists) config_dict = build_op25_config(generator)
if generator.config.talkgroupTags:
save_talkgroup_tags(generator.config.talkgroupTags)
if generator.config.whitelist:
save_whitelist(generator.config.whitelist)
# 2. Build the main OP25 dictionary structure
config_dict = {
"channels": [c.dict() for c in generator.config.channels],
"devices": [d.dict() for d in generator.config.devices],
"trunking": generator.config.trunking.dict(),
"metadata": generator.config.metadata.dict(),
"terminal": generator.config.terminal.dict()
}
elif generator.type == DecodeMode.ANALOG:
# Simple Analog NBFM Setup for quick testing
channels = [ChannelConfig(
channelName=generator.config.systemName,
enableAnalog="on",
frequency=generator.config.frequency,
demodType="fsk4",
filterType="widepulse"
)]
config_dict = {
"channels": [c.dict() for c in channels],
"devices": [{"gain": "LNA:32"}] # Default gain for analog test
}
else:
raise HTTPException(status_code=400, detail="Invalid decode mode")
# 3. Clean 'None' values to prevent OP25 parsing errors and save
final_json = del_none_in_dict(config_dict) final_json = del_none_in_dict(config_dict)
# 2. Handle Storage and Activation
if save_to_library_name: if save_to_library_name:
# Save to library
save_config_to_library(save_to_library_name, final_json) save_config_to_library(save_to_library_name, final_json)
save_library_sidecars(save_to_library_name, generator)
with open('/configs/active.cfg.json', 'w') as f: # Activate from library (Copies json + sidecars)
json.dump(final_json, f, indent=2) if not activate_config_from_library(save_to_library_name):
raise HTTPException(status_code=500, detail="Failed to activate saved configuration")
else:
# Save directly to active
with open('/configs/active.cfg.json', 'w') as f:
json.dump(final_json, f, indent=2)
if generator.type == DecodeMode.P25:
save_talkgroup_tags(generator.tags)
save_whitelist(generator.whitelist)
# 3. Generate Liquidsoap Script (Always required for active P25 session)
if generator.type == DecodeMode.P25:
generate_liquid_script(generator.icecastConfig)
LOGGER.info("Saved new configuration to active.cfg.json") LOGGER.info("Saved new configuration to active.cfg.json")
@@ -162,13 +218,19 @@ def create_op25_router():
raise HTTPException(status_code=404, detail=f"Config '{system_name}' not found in library volume") raise HTTPException(status_code=404, detail=f"Config '{system_name}' not found in library volume")
@router.post("/save_to_library") @router.post("/save_to_library")
async def save_to_library(system_name: str, config: dict): async def save_to_library(system_name: str, config: ConfigGenerator):
""" """
Directly saves a JSON configuration to the library. Directly saves a JSON configuration to the library.
""" """
if save_config_to_library(system_name, config): try:
return {"status": f"Config saved as {system_name}"} config_dict = build_op25_config(config)
raise HTTPException(status_code=500, detail="Failed to save configuration") final_json = del_none_in_dict(config_dict)
if save_config_to_library(system_name, final_json):
save_library_sidecars(system_name, config)
return {"status": f"Config saved as {system_name}"}
raise HTTPException(status_code=500, detail="Failed to save configuration")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/library") @router.get("/library")
async def get_library(): async def get_library():

View File

@@ -7,10 +7,11 @@ services:
restart: unless-stopped restart: unless-stopped
ports: ports:
- 8001:8001 - 8001:8001
- 8081:8081
devices: devices:
- "/dev/bus/usb:/dev/bus/usb" - "/dev/bus/usb:/dev/bus/usb"
volumes: volumes:
- ./config:/app/config - ./configs:/configs
- ./op25_logs:/tmp/op25 - ./op25_logs:/tmp/op25
env_file: env_file:
- .env - .env
@@ -20,6 +21,10 @@ services:
- NODE_LONG=${NODE_LONG} - NODE_LONG=${NODE_LONG}
- MQTT_BROKER=${MQTT_BROKER} - MQTT_BROKER=${MQTT_BROKER}
- ICECAST_SERVER=${ICECAST_SERVER} - ICECAST_SERVER=${ICECAST_SERVER}
- AUDIO_BUCKET=${AUDIO_BUCKET}
- HTTP_SERVER_PROTOCOL=${HTTP_SERVER_PROTOCOL}
- HTTP_SERVER_ADDRESS=${HTTP_SERVER_ADDRESS}
- HTTP_SERVER_PORT=${HTTP_SERVER_PORT}
networks: networks:
- radio-shared-net - radio-shared-net