feat: dynamic classifier classes, per-node UI, XSS fix, RSSI fix

Complements #326 (per-node state pipeline) with additional features:

- Dynamic adaptive classifier: discover activity classes from training
  data filenames instead of hardcoded array. Users add classes via
  filename convention (train_<class>_<desc>.jsonl), no code changes.
- Per-node UI cards: SensingTab shows individual node status with
  color-coded markers, RSSI, variance, and classification per node.
- Colored node markers in 3D gaussian splat view (8-color palette).
- Per-node RSSI history tracking in sensing service.
- XSS fix: UI uses createElement/textContent instead of innerHTML.
- RSSI sign fix: ensure dBm values are always negative.
- GET /api/v1/nodes endpoint for per-node health monitoring.
- node_features field in WebSocket SensingUpdate messages.
- Firmware watchdog fix: yield after every frame to prevent IDLE1 starvation.

Addresses #237, #276, #282

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Taylor Dawson 2026-03-27 21:21:15 -07:00
parent 3c02f6cfb0
commit d88994816f
6 changed files with 309 additions and 90 deletions

View file

@ -41,14 +41,12 @@ static const char *TAG = "edge_proc";
* ====================================================================== */
static edge_ring_buf_t s_ring;
static uint32_t s_ring_drops; /* Frames dropped due to full ring buffer. */
static inline bool ring_push(const uint8_t *iq, uint16_t len,
int8_t rssi, uint8_t channel)
{
uint32_t next = (s_ring.head + 1) % EDGE_RING_SLOTS;
if (next == s_ring.tail) {
s_ring_drops++;
return false; /* Full — drop frame. */
}
@ -790,13 +788,12 @@ static void process_frame(const edge_ring_slot_t *slot)
if ((s_frame_count % 200) == 0) {
ESP_LOGI(TAG, "Vitals: br=%.1f hr=%.1f motion=%.4f pres=%s "
"fall=%s persons=%u frames=%lu drops=%lu",
"fall=%s persons=%u frames=%lu",
s_breathing_bpm, s_heartrate_bpm, s_motion_energy,
s_presence_detected ? "YES" : "no",
s_fall_detected ? "YES" : "no",
(unsigned)s_latest_pkt.n_persons,
(unsigned long)s_frame_count,
(unsigned long)s_ring_drops);
(unsigned long)s_frame_count);
}
}
@ -834,32 +831,18 @@ static void edge_task(void *arg)
edge_ring_slot_t slot;
/* Maximum frames to process before a longer yield. On busy LANs
* (corporate networks, many APs), the ring buffer fills continuously.
* Without a batch limit the task processes frames back-to-back with
* only 1-tick yields, which on high frame rates can still starve
* IDLE1 enough to trip the 5-second task watchdog. See #266, #321. */
const uint8_t BATCH_LIMIT = 4;
while (1) {
uint8_t processed = 0;
while (processed < BATCH_LIMIT && ring_pop(&slot)) {
if (ring_pop(&slot)) {
process_frame(&slot);
processed++;
/* 1-tick yield between frames within a batch. */
/* Yield after every frame to feed the Core 1 watchdog.
* process_frame() is CPU-intensive (biquad filters, Welford stats,
* BPM estimation, multi-person vitals) and can take several ms.
* Without this yield, edge_dsp at priority 5 starves IDLE1 at
* priority 0, triggering the task watchdog. See issue #266. */
vTaskDelay(1);
}
if (processed > 0) {
/* Post-batch yield: 2 ticks (~20 ms at 100 Hz) so IDLE1 can
* run and feed the Core 1 watchdog even under sustained load.
* This is intentionally longer than the 1-tick inter-frame yield. */
vTaskDelay(2);
} else {
/* No frames available — sleep one full tick.
* NOTE: pdMS_TO_TICKS(5) == 0 at 100 Hz, which would busy-spin. */
vTaskDelay(1);
/* No frames available — yield briefly. */
vTaskDelay(pdMS_TO_TICKS(1));
}
}
}

View file

@ -10,6 +10,10 @@
//!
//! The trained model is serialised as JSON and hot-loaded at runtime so that
//! the classification thresholds adapt to the specific room and ESP32 placement.
//!
//! Classes are discovered dynamically from training data filenames instead of
//! being hardcoded, so new activity classes can be added just by recording data
//! with the appropriate filename convention.
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@ -20,9 +24,8 @@ use std::path::{Path, PathBuf};
/// Extended feature vector: 7 server features + 8 subcarrier-derived features = 15.
const N_FEATURES: usize = 15;
/// Activity classes we recognise.
pub const CLASSES: &[&str] = &["absent", "present_still", "present_moving", "active"];
const N_CLASSES: usize = 4;
/// Default class names for backward compatibility with old saved models.
const DEFAULT_CLASSES: &[&str] = &["absent", "present_still", "present_moving", "active"];
/// Extract extended feature vector from a JSONL frame (features + raw amplitudes).
pub fn features_from_frame(frame: &serde_json::Value) -> [f64; N_FEATURES] {
@ -124,8 +127,9 @@ pub struct ClassStats {
pub struct AdaptiveModel {
/// Per-class feature statistics (centroid + spread).
pub class_stats: Vec<ClassStats>,
/// Logistic regression weights: [N_CLASSES x (N_FEATURES + 1)] (last = bias).
pub weights: Vec<[f64; N_FEATURES + 1]>,
/// Logistic regression weights: [n_classes x (N_FEATURES + 1)] (last = bias).
/// Dynamic: the outer Vec length equals the number of discovered classes.
pub weights: Vec<Vec<f64>>,
/// Global feature normalisation: mean and stddev across all training data.
pub global_mean: [f64; N_FEATURES],
pub global_std: [f64; N_FEATURES],
@ -133,27 +137,38 @@ pub struct AdaptiveModel {
pub trained_frames: usize,
pub training_accuracy: f64,
pub version: u32,
/// Dynamically discovered class names (in index order).
#[serde(default = "default_class_names")]
pub class_names: Vec<String>,
}
/// Backward-compatible fallback for models saved without class_names.
fn default_class_names() -> Vec<String> {
DEFAULT_CLASSES.iter().map(|s| s.to_string()).collect()
}
impl Default for AdaptiveModel {
fn default() -> Self {
let n_classes = DEFAULT_CLASSES.len();
Self {
class_stats: Vec::new(),
weights: vec![[0.0; N_FEATURES + 1]; N_CLASSES],
weights: vec![vec![0.0; N_FEATURES + 1]; n_classes],
global_mean: [0.0; N_FEATURES],
global_std: [1.0; N_FEATURES],
trained_frames: 0,
training_accuracy: 0.0,
version: 1,
class_names: default_class_names(),
}
}
}
impl AdaptiveModel {
/// Classify a raw feature vector. Returns (class_label, confidence).
pub fn classify(&self, raw_features: &[f64; N_FEATURES]) -> (&'static str, f64) {
if self.weights.is_empty() || self.class_stats.is_empty() {
return ("present_still", 0.5);
pub fn classify(&self, raw_features: &[f64; N_FEATURES]) -> (String, f64) {
let n_classes = self.weights.len();
if n_classes == 0 || self.class_stats.is_empty() {
return ("present_still".to_string(), 0.5);
}
// Normalise features.
@ -163,8 +178,8 @@ impl AdaptiveModel {
}
// Compute logits: w·x + b for each class.
let mut logits = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES.min(self.weights.len()) {
let mut logits: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
let w = &self.weights[c];
let mut z = w[N_FEATURES]; // bias
for i in 0..N_FEATURES {
@ -176,8 +191,8 @@ impl AdaptiveModel {
// Softmax.
let max_logit = logits.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let exp_sum: f64 = logits.iter().map(|z| (z - max_logit).exp()).sum();
let mut probs = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES {
let mut probs: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
probs[c] = ((logits[c] - max_logit).exp()) / exp_sum;
}
@ -185,7 +200,11 @@ impl AdaptiveModel {
let (best_c, best_p) = probs.iter().enumerate()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
.unwrap();
let label = if best_c < CLASSES.len() { CLASSES[best_c] } else { "present_still" };
let label = if best_c < self.class_names.len() {
self.class_names[best_c].clone()
} else {
"present_still".to_string()
};
(label, *best_p)
}
@ -228,48 +247,88 @@ fn load_recording(path: &Path, class_idx: usize) -> Vec<Sample> {
}).collect()
}
/// Map a recording filename to a class index.
fn classify_recording_name(name: &str) -> Option<usize> {
/// Map a recording filename to a class name (String).
/// Returns the discovered class name for the file, or None if it cannot be determined.
fn classify_recording_name(name: &str) -> Option<String> {
let lower = name.to_lowercase();
if lower.contains("empty") || lower.contains("absent") { Some(0) }
else if lower.contains("still") || lower.contains("sitting") || lower.contains("standing") { Some(1) }
else if lower.contains("walking") || lower.contains("moving") { Some(2) }
else if lower.contains("active") || lower.contains("exercise") || lower.contains("running") { Some(3) }
else { None }
// Strip "train_" prefix and ".jsonl" suffix, then extract the class label.
// Convention: train_<class>_<description>.jsonl
// The class is the first segment after "train_" that matches a known pattern,
// or the entire middle portion if no pattern matches.
// Check common patterns first for backward compat
if lower.contains("empty") || lower.contains("absent") { return Some("absent".into()); }
if lower.contains("still") || lower.contains("sitting") || lower.contains("standing") { return Some("present_still".into()); }
if lower.contains("walking") || lower.contains("moving") { return Some("present_moving".into()); }
if lower.contains("active") || lower.contains("exercise") || lower.contains("running") { return Some("active".into()); }
// Fallback: extract class from filename structure train_<class>_*.jsonl
let stem = lower.trim_start_matches("train_").trim_end_matches(".jsonl");
let class_name = stem.split('_').next().unwrap_or(stem);
if !class_name.is_empty() {
Some(class_name.to_string())
} else {
None
}
}
/// Train a model from labeled JSONL recordings in a directory.
///
/// Recordings are matched to classes by filename pattern:
/// - `*empty*` / `*absent*` → absent (0)
/// - `*still*` / `*sitting*` → present_still (1)
/// - `*walking*` / `*moving*` → present_moving (2)
/// - `*active*` / `*exercise*`→ active (3)
/// Recordings are matched to classes by filename pattern. Classes are discovered
/// dynamically from the training data filenames:
/// - `*empty*` / `*absent*` → absent
/// - `*still*` / `*sitting*` → present_still
/// - `*walking*` / `*moving*` → present_moving
/// - `*active*` / `*exercise*`→ active
/// - Any other `train_<class>_*.jsonl` → <class>
pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, String> {
// Scan for train_* files.
let mut samples: Vec<Sample> = Vec::new();
let entries = std::fs::read_dir(recordings_dir)
.map_err(|e| format!("Cannot read {}: {}", recordings_dir.display(), e))?;
// First pass: scan filenames to discover all unique class names.
let entries: Vec<_> = std::fs::read_dir(recordings_dir)
.map_err(|e| format!("Cannot read {}: {}", recordings_dir.display(), e))?
.flatten()
.collect();
for entry in entries.flatten() {
let mut class_map: HashMap<String, usize> = HashMap::new();
let mut class_names: Vec<String> = Vec::new();
// Collect (entry, class_name) pairs for files that match.
let mut file_classes: Vec<(PathBuf, String, String)> = Vec::new(); // (path, fname, class_name)
for entry in &entries {
let fname = entry.file_name().to_string_lossy().to_string();
if !fname.starts_with("train_") || !fname.ends_with(".jsonl") {
continue;
}
if let Some(class_idx) = classify_recording_name(&fname) {
let loaded = load_recording(&entry.path(), class_idx);
eprintln!(" Loaded {}: {} frames → class '{}'",
fname, loaded.len(), CLASSES[class_idx]);
samples.extend(loaded);
if let Some(class_name) = classify_recording_name(&fname) {
if !class_map.contains_key(&class_name) {
let idx = class_names.len();
class_map.insert(class_name.clone(), idx);
class_names.push(class_name.clone());
}
file_classes.push((entry.path(), fname, class_name));
}
}
let n_classes = class_names.len();
if n_classes == 0 {
return Err("No training samples found. Record data with train_* prefix.".into());
}
// Second pass: load recordings with the discovered class indices.
let mut samples: Vec<Sample> = Vec::new();
for (path, fname, class_name) in &file_classes {
let class_idx = class_map[class_name];
let loaded = load_recording(path, class_idx);
eprintln!(" Loaded {}: {} frames → class '{}'",
fname, loaded.len(), class_name);
samples.extend(loaded);
}
if samples.is_empty() {
return Err("No training samples found. Record data with train_* prefix.".into());
}
let n = samples.len();
eprintln!("Total training samples: {n}");
eprintln!("Total training samples: {n} across {n_classes} classes: {:?}", class_names);
// ── Compute global normalisation stats ──
let mut global_mean = [0.0f64; N_FEATURES];
@ -289,9 +348,9 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
}
// ── Compute per-class statistics ──
let mut class_sums = vec![[0.0f64; N_FEATURES]; N_CLASSES];
let mut class_sq = vec![[0.0f64; N_FEATURES]; N_CLASSES];
let mut class_counts = vec![0usize; N_CLASSES];
let mut class_sums = vec![[0.0f64; N_FEATURES]; n_classes];
let mut class_sq = vec![[0.0f64; N_FEATURES]; n_classes];
let mut class_counts = vec![0usize; n_classes];
for s in &samples {
let c = s.class_idx;
class_counts[c] += 1;
@ -302,7 +361,7 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
}
let mut class_stats = Vec::new();
for c in 0..N_CLASSES {
for c in 0..n_classes {
let cnt = class_counts[c].max(1) as f64;
let mut mean = [0.0; N_FEATURES];
let mut stddev = [0.0; N_FEATURES];
@ -311,7 +370,7 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
stddev[i] = ((class_sq[c][i] / cnt) - mean[i] * mean[i]).max(0.0).sqrt();
}
class_stats.push(ClassStats {
label: CLASSES[c].to_string(),
label: class_names[c].clone(),
count: class_counts[c],
mean,
stddev,
@ -328,7 +387,7 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
}).collect();
// ── Train logistic regression via mini-batch SGD ──
let mut weights = vec![[0.0f64; N_FEATURES + 1]; N_CLASSES];
let mut weights: Vec<Vec<f64>> = vec![vec![0.0f64; N_FEATURES + 1]; n_classes];
let lr = 0.1;
let epochs = 200;
let batch_size = 32;
@ -348,19 +407,19 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
}
let mut epoch_loss = 0.0f64;
let mut batch_count = 0;
let mut _batch_count = 0;
for batch_start in (0..norm_samples.len()).step_by(batch_size) {
let batch_end = (batch_start + batch_size).min(norm_samples.len());
let batch = &norm_samples[batch_start..batch_end];
// Accumulate gradients.
let mut grad = vec![[0.0f64; N_FEATURES + 1]; N_CLASSES];
let mut grad: Vec<Vec<f64>> = vec![vec![0.0f64; N_FEATURES + 1]; n_classes];
for (x, target) in batch {
// Forward: softmax.
let mut logits = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES {
let mut logits: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
logits[c] = weights[c][N_FEATURES]; // bias
for i in 0..N_FEATURES {
logits[c] += weights[c][i] * x[i];
@ -368,8 +427,8 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
}
let max_l = logits.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let exp_sum: f64 = logits.iter().map(|z| (z - max_l).exp()).sum();
let mut probs = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES {
let mut probs: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
probs[c] = ((logits[c] - max_l).exp()) / exp_sum;
}
@ -377,7 +436,7 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
epoch_loss += -(probs[*target].max(1e-15)).ln();
// Gradient: prob - one_hot(target).
for c in 0..N_CLASSES {
for c in 0..n_classes {
let delta = probs[c] - if c == *target { 1.0 } else { 0.0 };
for i in 0..N_FEATURES {
grad[c][i] += delta * x[i];
@ -389,12 +448,12 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
// Update weights.
let bs = batch.len() as f64;
let current_lr = lr * (1.0 - epoch as f64 / epochs as f64); // linear decay
for c in 0..N_CLASSES {
for c in 0..n_classes {
for i in 0..=N_FEATURES {
weights[c][i] -= current_lr * grad[c][i] / bs;
}
}
batch_count += 1;
_batch_count += 1;
}
if epoch % 50 == 0 || epoch == epochs - 1 {
@ -406,8 +465,8 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
// ── Evaluate accuracy ──
let mut correct = 0;
for (x, target) in &norm_samples {
let mut logits = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES {
let mut logits: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
logits[c] = weights[c][N_FEATURES];
for i in 0..N_FEATURES {
logits[c] += weights[c][i] * x[i];
@ -422,12 +481,12 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
eprintln!("Training accuracy: {correct}/{n} = {accuracy:.1}%");
// ── Per-class accuracy ──
let mut class_correct = vec![0usize; N_CLASSES];
let mut class_total = vec![0usize; N_CLASSES];
let mut class_correct = vec![0usize; n_classes];
let mut class_total = vec![0usize; n_classes];
for (x, target) in &norm_samples {
class_total[*target] += 1;
let mut logits = [0.0f64; N_CLASSES];
for c in 0..N_CLASSES {
let mut logits: Vec<f64> = vec![0.0; n_classes];
for c in 0..n_classes {
logits[c] = weights[c][N_FEATURES];
for i in 0..N_FEATURES {
logits[c] += weights[c][i] * x[i];
@ -438,9 +497,9 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
.unwrap().0;
if pred == *target { class_correct[*target] += 1; }
}
for c in 0..N_CLASSES {
for c in 0..n_classes {
let tot = class_total[c].max(1);
eprintln!(" {}: {}/{} ({:.0}%)", CLASSES[c], class_correct[c], tot,
eprintln!(" {}: {}/{} ({:.0}%)", class_names[c], class_correct[c], tot,
class_correct[c] as f64 / tot as f64 * 100.0);
}
@ -452,6 +511,7 @@ pub fn train_from_recordings(recordings_dir: &Path) -> Result<AdaptiveModel, Str
trained_frames: n,
training_accuracy: accuracy,
version: 1,
class_names,
})
}

View file

@ -212,6 +212,9 @@ struct SensingUpdate {
/// Estimated person count from CSI feature heuristics (1-3 for single ESP32).
#[serde(skip_serializing_if = "Option::is_none")]
estimated_persons: Option<usize>,
/// Per-node feature breakdown for multi-node deployments.
#[serde(skip_serializing_if = "Option::is_none")]
node_features: Option<Vec<PerNodeFeatureInfo>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -328,6 +331,18 @@ impl NodeState {
}
}
/// Per-node feature info for WebSocket broadcasts (multi-node support).
#[derive(Debug, Clone, Serialize, Deserialize)]
struct PerNodeFeatureInfo {
node_id: u8,
features: FeatureInfo,
classification: ClassificationInfo,
rssi_dbm: f64,
last_seen_ms: u64,
frame_rate_hz: f64,
stale: bool,
}
/// Shared application state
struct AppStateInner {
latest_update: Option<SensingUpdate>,
@ -570,7 +585,9 @@ fn parse_esp32_frame(buf: &[u8]) -> Option<Esp32Frame> {
let n_subcarriers = buf[6];
let freq_mhz = u16::from_le_bytes([buf[8], buf[9]]);
let sequence = u32::from_le_bytes([buf[10], buf[11], buf[12], buf[13]]);
let rssi = buf[14] as i8;
let rssi_raw = buf[14] as i8;
// Fix RSSI sign: ensure it's always negative (dBm convention).
let rssi = if rssi_raw > 0 { rssi_raw.saturating_neg() } else { rssi_raw };
let noise_floor = buf[15] as i8;
let iq_start = 20;
@ -1455,6 +1472,7 @@ async fn windows_wifi_task(state: SharedState, tick_ms: u64) {
model_status: None,
persons: None,
estimated_persons: if est_persons > 0 { Some(est_persons) } else { None },
node_features: None,
};
// Populate persons from the sensing update.
@ -1588,6 +1606,7 @@ async fn windows_wifi_fallback_tick(state: &SharedState, seq: u32) {
model_status: None,
persons: None,
estimated_persons: if est_persons > 0 { Some(est_persons) } else { None },
node_features: None,
};
let persons = derive_pose_from_sensing(&update);
@ -2907,6 +2926,34 @@ async fn sona_activate(
}
}
/// GET /api/v1/nodes — per-node health and feature info.
async fn nodes_endpoint(State(state): State<SharedState>) -> Json<serde_json::Value> {
let s = state.read().await;
let now = std::time::Instant::now();
let nodes: Vec<serde_json::Value> = s.node_states.iter()
.map(|(&id, ns)| {
let elapsed_ms = ns.last_frame_time
.map(|t| now.duration_since(t).as_millis() as u64)
.unwrap_or(999999);
let stale = elapsed_ms > 5000;
let status = if stale { "stale" } else { "active" };
let rssi = ns.rssi_history.back().copied().unwrap_or(-90.0);
serde_json::json!({
"node_id": id,
"status": status,
"last_seen_ms": elapsed_ms,
"rssi_dbm": rssi,
"motion_level": &ns.current_motion_level,
"person_count": ns.prev_person_count,
})
})
.collect();
Json(serde_json::json!({
"nodes": nodes,
"total": nodes.len(),
}))
}
async fn info_page() -> Html<String> {
Html(format!(
"<html><body>\
@ -3062,6 +3109,7 @@ async fn udp_receiver_task(state: SharedState, udp_port: u16) {
model_status: None,
persons: None,
estimated_persons: if total_persons > 0 { Some(total_persons) } else { None },
node_features: None,
};
let persons = derive_pose_from_sensing(&update);
@ -3240,6 +3288,7 @@ async fn udp_receiver_task(state: SharedState, udp_port: u16) {
model_status: None,
persons: None,
estimated_persons: if total_persons > 0 { Some(total_persons) } else { None },
node_features: None,
};
let persons = derive_pose_from_sensing(&update);
@ -3358,6 +3407,7 @@ async fn simulated_data_task(state: SharedState, tick_ms: u64) {
},
persons: None,
estimated_persons: if est_persons > 0 { Some(est_persons) } else { None },
node_features: None,
};
// Populate persons from the sensing update.
@ -4045,6 +4095,8 @@ async fn main() {
.route("/api/v1/metrics", get(health_metrics))
// Sensing endpoints
.route("/api/v1/sensing/latest", get(latest))
// Per-node health endpoint
.route("/api/v1/nodes", get(nodes_endpoint))
// Vital sign endpoints
.route("/api/v1/vital-signs", get(vital_signs_endpoint))
.route("/api/v1/edge-vitals", get(edge_vitals_endpoint))

View file

@ -110,12 +110,18 @@ export class SensingTab {
<div class="sensing-card-title">About This Data</div>
<p class="sensing-about-text">
Metrics are computed from WiFi Channel State Information (CSI).
With <strong>1 ESP32</strong> you get presence detection, breathing
With <strong><span id="sensingNodeCount">0</span> ESP32 node(s)</strong> you get presence detection, breathing
estimation, and gross motion. Add <strong>3-4+ ESP32 nodes</strong>
around the room for spatial resolution and limb-level tracking.
</p>
</div>
<!-- Node Status -->
<div class="sensing-card" id="sensingNodeCards">
<div class="sensing-card-title">NODE STATUS</div>
<div id="nodeStatusContainer"></div>
</div>
<!-- Extra info -->
<div class="sensing-card">
<div class="sensing-card-title">Details</div>
@ -193,6 +199,9 @@ export class SensingTab {
// Update HUD
this._updateHUD(data);
// Update per-node panels
this._updateNodePanels(data);
}
_onStateChange(state) {
@ -233,6 +242,11 @@ export class SensingTab {
const f = data.features || {};
const c = data.classification || {};
// Node count
const nodeCount = (data.nodes || []).length;
const countEl = this.container.querySelector('#sensingNodeCount');
if (countEl) countEl.textContent = String(nodeCount);
// RSSI
this._setText('sensingRssi', `${(f.mean_rssi || -80).toFixed(1)} dBm`);
this._setText('sensingSource', data.source || '');
@ -309,6 +323,57 @@ export class SensingTab {
ctx.stroke();
}
// ---- Per-node panels ---------------------------------------------------
_updateNodePanels(data) {
const container = this.container.querySelector('#nodeStatusContainer');
if (!container) return;
const nodeFeatures = data.node_features || [];
if (nodeFeatures.length === 0) {
container.textContent = '';
const msg = document.createElement('div');
msg.style.cssText = 'color:#888;font-size:12px;padding:8px;';
msg.textContent = 'No nodes detected';
container.appendChild(msg);
return;
}
const NODE_COLORS = ['#00ccff', '#ff6600', '#00ff88', '#ff00cc', '#ffcc00', '#8800ff', '#00ffcc', '#ff0044'];
container.textContent = '';
for (const nf of nodeFeatures) {
const color = NODE_COLORS[nf.node_id % NODE_COLORS.length];
const statusColor = nf.stale ? '#888' : '#0f0';
const row = document.createElement('div');
row.style.cssText = `display:flex;align-items:center;gap:8px;padding:6px 8px;margin-bottom:4px;background:rgba(255,255,255,0.03);border-radius:6px;border-left:3px solid ${color};`;
const idCol = document.createElement('div');
idCol.style.minWidth = '50px';
const nameEl = document.createElement('div');
nameEl.style.cssText = `font-size:11px;font-weight:600;color:${color};`;
nameEl.textContent = 'Node ' + nf.node_id;
const statusEl = document.createElement('div');
statusEl.style.cssText = `font-size:9px;color:${statusColor};`;
statusEl.textContent = nf.stale ? 'STALE' : 'ACTIVE';
idCol.appendChild(nameEl);
idCol.appendChild(statusEl);
const metricsCol = document.createElement('div');
metricsCol.style.cssText = 'flex:1;font-size:10px;color:#aaa;';
metricsCol.textContent = (nf.rssi_dbm || -80).toFixed(0) + ' dBm · var ' + (nf.features?.variance || 0).toFixed(1);
const classCol = document.createElement('div');
classCol.style.cssText = 'font-size:10px;font-weight:600;color:#ccc;';
const motion = (nf.classification?.motion_level || 'absent').toUpperCase();
const conf = ((nf.classification?.confidence || 0) * 100).toFixed(0);
classCol.textContent = motion + ' ' + conf + '%';
row.appendChild(idCol);
row.appendChild(metricsCol);
row.appendChild(classCol);
container.appendChild(row);
}
}
// ---- Resize ------------------------------------------------------------
_setupResize() {

View file

@ -66,6 +66,10 @@ function valueToColor(v) {
return [r, g, b];
}
// ---- Node marker color palette -------------------------------------------
const NODE_MARKER_COLORS = [0x00ccff, 0xff6600, 0x00ff88, 0xff00cc, 0xffcc00, 0x8800ff, 0x00ffcc, 0xff0044];
// ---- GaussianSplatRenderer -----------------------------------------------
export class GaussianSplatRenderer {
@ -108,6 +112,10 @@ export class GaussianSplatRenderer {
// Node markers (ESP32 / router positions)
this._createNodeMarkers(THREE);
// Dynamic per-node markers (multi-node support)
this.nodeMarkers = new Map(); // nodeId -> THREE.Mesh
this._THREE = THREE;
// Body disruption blob
this._createBodyBlob(THREE);
@ -369,11 +377,43 @@ export class GaussianSplatRenderer {
bGeo.attributes.splatSize.needsUpdate = true;
}
// -- Update node positions ---------------------------------------------
// -- Update node positions (legacy single-node) ------------------------
if (nodes.length > 0 && nodes[0].position) {
const pos = nodes[0].position;
this.nodeMarker.position.set(pos[0], 0.5, pos[2]);
}
// -- Update dynamic per-node markers (multi-node support) --------------
if (nodes && nodes.length > 0 && this.scene) {
const THREE = this._THREE || window.THREE;
if (THREE) {
const activeIds = new Set();
for (const node of nodes) {
activeIds.add(node.node_id);
if (!this.nodeMarkers.has(node.node_id)) {
const geo = new THREE.SphereGeometry(0.25, 16, 16);
const mat = new THREE.MeshBasicMaterial({
color: NODE_MARKER_COLORS[node.node_id % NODE_MARKER_COLORS.length],
transparent: true,
opacity: 0.8,
});
const marker = new THREE.Mesh(geo, mat);
this.scene.add(marker);
this.nodeMarkers.set(node.node_id, marker);
}
const marker = this.nodeMarkers.get(node.node_id);
const pos = node.position || [0, 0, 0];
marker.position.set(pos[0], 0.5, pos[2]);
}
// Remove stale markers
for (const [id, marker] of this.nodeMarkers) {
if (!activeIds.has(id)) {
this.scene.remove(marker);
this.nodeMarkers.delete(id);
}
}
}
}
}
// ---- Render loop -------------------------------------------------------

View file

@ -84,6 +84,11 @@ class SensingService {
return [...this._rssiHistory];
}
/** Get per-node RSSI history (object keyed by node_id). */
getPerNodeRssiHistory() {
return { ...(this._perNodeRssiHistory || {}) };
}
/** Current connection state. */
get state() {
return this._state;
@ -327,6 +332,20 @@ class SensingService {
}
}
// Per-node RSSI tracking
if (!this._perNodeRssiHistory) this._perNodeRssiHistory = {};
if (data.node_features) {
for (const nf of data.node_features) {
if (!this._perNodeRssiHistory[nf.node_id]) {
this._perNodeRssiHistory[nf.node_id] = [];
}
this._perNodeRssiHistory[nf.node_id].push(nf.rssi_dbm);
if (this._perNodeRssiHistory[nf.node_id].length > this._maxHistory) {
this._perNodeRssiHistory[nf.node_id].shift();
}
}
}
// Notify all listeners
for (const cb of this._listeners) {
try {