Init OP25 crash detection

This commit is contained in:
Logan Cusano
2024-09-14 15:49:15 -04:00
parent 0f5ee3b3fb
commit 821e4f6a64

View File

@@ -3,11 +3,59 @@ const log = new DebugBuilder("client", "op25Handler");
import { P25ConfigGenerator, NBFMConfigGenerator } from './modules/op25ConfigGenerators.mjs';
import { getAllPresets } from '../modules/radioPresetHandler.mjs';
import { startService, stopService } from '../modules/serviceHandler.mjs';
import axios from 'axios'; // Import axios for HTTP requests
import dotenv from 'dotenv';
dotenv.config()
dotenv.config();
let currentSystem = undefined;
let crashDetectionInterval; // Variable to store the crash detection interval ID
/**
* Checks the health of the OP25 web portal by making an HTTP HEAD request.
* If the portal does not respond or there is an issue, retries a specified number of times.
* If all retry attempts fail, it restarts the OP25 service.
*
* @async
* @function checkServiceHealth
* @returns {Promise<void>} Resolves if the web portal is healthy or after the restart process is triggered.
* @throws Will log errors related to the health check or service restart.
*/
const checkServiceHealth = async () => {
try {
log.INFO("Checking OP25 web portal health...");
// Perform an HTTP HEAD request to the web portal with a 5-second timeout
await axios.head('http://localhost:8081', { timeout: 5000 });
log.INFO("Web portal is healthy.");
} catch (error) {
if (error.code === 'ECONNABORTED') {
log.ERROR("Request timed out. The web portal took too long to respond.");
} else if (error.response) {
log.ERROR(`Web portal responded with status ${error.response.status}: ${error.response.statusText}`);
} else if (error.request) {
log.ERROR("No response received from web portal.");
} else {
log.ERROR(`Unexpected error occurred: ${error.message}`);
}
// Retry mechanism
const retryAttempts = 3;
for (let i = 1; i <= retryAttempts; i++) {
log.INFO(`Retrying to check web portal health... Attempt ${i}/${retryAttempts}`);
try {
await axios.head('http://localhost:8081', { timeout: 5000 });
log.INFO("Web portal is healthy on retry.");
return;
} catch (retryError) {
log.ERROR(`Retry ${i} failed: ${retryError.message}`);
if (i === retryAttempts) {
log.ERROR("All retry attempts failed. Restarting the service...");
await restartOp25();
}
}
}
}
};
/**
* Creates configuration based on the preset and restarts the OP25 service.
@@ -41,9 +89,7 @@ const createConfigAndRestartService = async (systemName, preset) => {
const op25ConfigPath = `${op25FilePath}${op25FilePath.endsWith('/') ? 'active.cfg.json' : '/active.cfg.json'}`;
await generator.exportToFile(op25ConfigPath);
// Restart the service
await stopService('op25-multi_rx');
await startService('op25-multi_rx');
await restartOp25();
};
/**
@@ -65,8 +111,23 @@ export const openOP25 = async (systemName) => {
}
await createConfigAndRestartService(systemName, preset);
// Start OP25 crash detection
if (!crashDetectionInterval) {
crashDetectionInterval = setInterval(checkServiceHealth, 30000); // Check every 30 seconds
log.INFO("Started crash detection.");
}
};
/**
* Restarts the OP25 service without changing the config.
* @returns {Promise<void>}
*/
export const restartOp25 = async () => {
// Restart the service
await stopService('op25-multi_rx');
await startService('op25-multi_rx');
}
/**
* Closes the OP25 service.
@@ -75,6 +136,13 @@ export const openOP25 = async (systemName) => {
export const closeOP25 = async () => {
currentSystem = undefined;
await stopService('op25-multi_rx');
// Stop crash detection
if (crashDetectionInterval) {
clearInterval(crashDetectionInterval);
crashDetectionInterval = null;
log.INFO("Stopped crash detection.");
}
};
/**