//! Main application state and update loop.
use crate::audio::AudioManager;
use crate::ipc::MlClient;
use crate::state::{AppConfig, AppMode, SessionState};
use crate::ui::{self, VoiceModeTheme};
use crossbeam_channel::{bounded, Receiver, Sender};
use eframe::egui;
use std::process::{Child, Command, Stdio};
use std::sync::Arc;
use std::thread;
use parking_lot::RwLock;
/// Events from background ML processing
enum MlEvent {
Transcribed { text: String },
Response { text: String },
Audio { samples: Vec<f32>, sample_rate: u32 },
Done,
Error(String),
/// VAD check result from Silero
VadResult { is_speech: bool, probability: f32 },
}
impl Drop for VoiceModeApp {
fn drop(&mut self) {
// Save config (window state is saved in on_exit)
self.config.sidebar_visible = self.sidebar_visible;
self.config.color_index = self.color_index;
if let Err(e) = self.config.save() {
tracing::error!("Failed to save config: {}", e);
}
// Kill the ML server subprocess when app closes
if let Some(mut child) = self.ml_server_process.take() {
tracing::info!("Stopping ML server...");
let _ = child.kill();
let _ = child.wait();
}
}
}
/// Main application struct.
pub struct VoiceModeApp {
/// Application configuration
config: AppConfig,
/// Current UI mode (Full, Overlay, Settings)
mode: AppMode,
/// Theme with colors and textures
theme: VoiceModeTheme,
/// Session state (conversation, emotion, etc.)
session: Arc<RwLock<SessionState>>,
/// Audio manager for capture/playback
audio: Option<AudioManager>,
/// ML server client
ml_client: Option<MlClient>,
/// Waveform data for visualization
waveform_data: Vec<f32>,
/// Current audio levels (left, right)
audio_levels: (f32, f32),
/// Is currently recording
is_recording: bool,
/// Is currently speaking
is_speaking: bool,
/// Status message
status: String,
/// Current waveform color index (0=orange, 1=blue, 2=green, 3=purple)
color_index: usize,
/// Sidebar visible
sidebar_visible: bool,
/// Channel for receiving ML events
ml_event_rx: Receiver<MlEvent>,
/// Channel for sending ML events (cloned into threads)
ml_event_tx: Sender<MlEvent>,
/// ML server subprocess
ml_server_process: Option<Child>,
/// Whether we've already tried to start/connect to ML server
ml_server_checked: bool,
/// Text input for typing messages
text_input: String,
/// Hands-free VAD mode (vs push-to-talk)
vad_mode: bool,
/// Typing mode (disables PTT to allow typing)
typing_mode: bool,
/// VAD state tracking (Silero-based)
vad_speech_detected: bool,
vad_silence_frames: u32,
vad_speech_frames: u32,
/// Frame counter for periodic Silero VAD checks
vad_check_frame: u32,
/// Pending VAD check (to avoid overlapping checks)
vad_check_pending: bool,
/// Available skills (loaded from server)
available_skills: Vec<(String, String)>, // (id, name)
/// Avatar texture
avatar_texture: Option<egui::TextureHandle>,
}
impl VoiceModeApp {
/// Create a new application instance.
pub fn new(cc: &eframe::CreationContext<'_>) -> Self {
// Configure custom fonts if needed
// configure_fonts(&cc.egui_ctx);
// Set dark theme
cc.egui_ctx.set_visuals(egui::Visuals::dark());
// Create ML event channel
let (ml_event_tx, ml_event_rx) = bounded(32);
// Load config
let config = AppConfig::load();
let sidebar_visible = config.sidebar_visible;
let color_index = config.color_index;
Self {
config,
mode: AppMode::Full,
theme: VoiceModeTheme::default(),
session: Arc::new(RwLock::new(SessionState::default())),
audio: None,
ml_client: None,
waveform_data: vec![0.0; 256],
audio_levels: (0.0, 0.0),
is_recording: false,
is_speaking: false,
status: "Initializing...".to_string(),
color_index,
sidebar_visible,
ml_event_rx,
ml_event_tx,
ml_server_process: None,
ml_server_checked: false,
text_input: String::new(),
vad_mode: false,
typing_mode: false,
vad_speech_detected: false,
vad_silence_frames: 0,
vad_speech_frames: 0,
vad_check_frame: 0,
vad_check_pending: false,
available_skills: Vec::new(),
avatar_texture: None,
}
}
/// Load the avatar image.
fn load_avatar(&mut self, ctx: &egui::Context) {
if self.avatar_texture.is_some() {
return;
}
// Try to load character_placeholder.png from assets
// Check multiple possible locations for the assets folder
let possible_paths = [
"assets/character_placeholder.png",
"../assets/character_placeholder.png",
"../../assets/character_placeholder.png",
"C:/AI/localvoicemode/v2/voicemode-ui/assets/character_placeholder.png"
];
for path_str in possible_paths {
let path = std::path::Path::new(path_str);
if path.exists() {
if let Ok(image_data) = std::fs::read(path) {
if let Ok(image) = image::load_from_memory(&image_data) {
let rgba = image.to_rgba8();
let size = [rgba.width() as usize, rgba.height() as usize];
let pixels = rgba.as_flat_samples();
let color_image = egui::ColorImage::from_rgba_unmultiplied(size, pixels.as_slice());
self.avatar_texture = Some(ctx.load_texture("avatar", color_image, egui::TextureOptions::LINEAR));
tracing::info!("Loaded avatar image from {:?}", path);
return;
}
}
}
}
}
/// Initialize audio and ML client (called after first frame).
fn initialize(&mut self) {
// Initialize audio manager (once)
if self.audio.is_none() {
match AudioManager::new() {
Ok(audio) => {
self.audio = Some(audio);
tracing::info!("Audio manager initialized");
}
Err(e) => {
tracing::error!("Failed to initialize audio: {}", e);
self.status = format!("Audio error: {}", e);
}
}
}
// Start ML server subprocess if not already checked
if !self.ml_server_checked {
self.ml_server_checked = true;
self.start_ml_server();
}
// Try to connect to ML server (may take a while for models to load)
if self.ml_client.is_none() {
match MlClient::connect(&self.config.ml_server_address) {
Ok(mut client) => {
// Try to get status to verify server is ready
match client.status() {
Ok(_) => {
// Load available skills
if let Ok(skills) = client.list_skills() {
self.available_skills = skills.iter()
.map(|s| (s.id.clone(), s.name.clone()))
.collect();
tracing::info!("Loaded {} skills", self.available_skills.len());
}
// Load selected skill
if !self.config.skill.is_empty() {
if let Err(e) = client.load_skill(&self.config.skill) {
tracing::warn!("Failed to load skill {}: {}", self.config.skill, e);
}
}
self.ml_client = Some(client);
self.status = "Ready".to_string();
tracing::info!("Connected to ML server");
}
Err(_) => {
// Server connected but not ready yet
self.status = "Loading models...".to_string();
}
}
}
Err(_) => {
// Server still starting
self.status = "Starting ML server...".to_string();
}
}
}
}
/// Check if ML server is already running.
fn is_ml_server_running(&self) -> bool {
use std::net::TcpStream;
TcpStream::connect(&self.config.ml_server_address).is_ok()
}
/// Start the ML server as a subprocess.
fn start_ml_server(&mut self) {
// Check if server is already running on the port
if self.is_ml_server_running() {
tracing::info!("ML server already running on {}", self.config.ml_server_address);
return;
}
// Find the ml-server directory relative to the executable
let ml_server_dir = std::env::current_exe()
.ok()
.and_then(|p| p.parent().map(|p| p.to_path_buf()))
.map(|p| p.join("..").join("..").join("ml-server"))
.unwrap_or_else(|| std::path::PathBuf::from("../ml-server"));
// Also try the v2/ml-server path
let alt_dir = std::path::PathBuf::from("C:/AI/localvoicemode/v2/ml-server");
let working_dir = if alt_dir.exists() { alt_dir } else { ml_server_dir };
tracing::info!("Starting ML server from {:?}", working_dir);
// Try to find Python
let python = if cfg!(windows) {
// Try venv first, then system python
let venv_python = std::path::PathBuf::from("C:/AI/localvoicemode/.venv/Scripts/python.exe");
if venv_python.exists() {
venv_python.to_string_lossy().to_string()
} else {
"python".to_string()
}
} else {
"python3".to_string()
};
#[cfg(windows)]
let result = {
use std::os::windows::process::CommandExt;
use std::fs::File;
const CREATE_NO_WINDOW: u32 = 0x08000000;
// Log to file for debugging
let log_path = working_dir.join("ml_server.log");
let log_file = File::create(&log_path).ok();
let stderr_file = File::create(working_dir.join("ml_server_err.log")).ok();
let mut cmd = Command::new(&python);
cmd.args(["-m", "ml_server"])
.current_dir(&working_dir)
.creation_flags(CREATE_NO_WINDOW);
if let Some(f) = log_file {
cmd.stdout(f);
} else {
cmd.stdout(Stdio::null());
}
if let Some(f) = stderr_file {
cmd.stderr(f);
} else {
cmd.stderr(Stdio::null());
}
cmd.spawn()
};
#[cfg(not(windows))]
let result = Command::new(&python)
.args(["-m", "ml_server"])
.current_dir(&working_dir)
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn();
match result
{
Ok(child) => {
tracing::info!("ML server started (PID: {})", child.id());
self.ml_server_process = Some(child);
self.status = "Starting ML server...".to_string();
}
Err(e) => {
tracing::error!("Failed to start ML server: {}", e);
self.status = format!("ML server error: {}", e);
}
}
}
/// Handle keyboard shortcuts.
fn handle_input(&mut self, ctx: &egui::Context) {
// Space for push-to-talk
// Only allow PTT if NOT in typing mode and NOT in VAD mode
// (If in typing mode, Space is for typing. If in VAD mode, recording is automatic.)
if !self.typing_mode && !self.vad_mode {
if ctx.input(|i| i.key_pressed(egui::Key::Space)) && !self.is_recording {
self.start_recording();
}
if ctx.input(|i| i.key_released(egui::Key::Space)) && self.is_recording {
self.stop_recording();
}
}
// Escape to toggle overlay mode
if ctx.input(|i| i.key_pressed(egui::Key::Escape)) {
self.mode = match self.mode {
AppMode::Full => AppMode::Overlay,
AppMode::Overlay => AppMode::Full,
AppMode::Settings => AppMode::Full,
};
}
// F1 for settings
if ctx.input(|i| i.key_pressed(egui::Key::F1)) {
self.mode = AppMode::Settings;
}
// C to cycle colors (orange -> blue -> green -> purple)
if ctx.input(|i| i.key_pressed(egui::Key::C)) {
self.color_index = (self.color_index + 1) % 4;
}
// Tab to toggle sidebar
if ctx.input(|i| i.key_pressed(egui::Key::Tab)) {
self.sidebar_visible = !self.sidebar_visible;
}
}
fn start_recording(&mut self) {
self.is_recording = true;
self.status = if self.vad_mode {
"Listening...".to_string()
} else {
"Recording...".to_string()
};
if let Some(audio) = &mut self.audio {
audio.start_recording();
}
}
fn stop_recording(&mut self) {
self.is_recording = false;
self.status = "Processing...".to_string();
if let Some(audio) = &mut self.audio {
let audio_data = audio.stop_recording();
let sample_rate = audio.sample_rate();
if audio_data.is_empty() {
self.status = "No audio recorded".to_string();
return;
}
self.process_audio(audio_data, sample_rate, false);
}
}
/// Stop recording and process (Smart Turn already verified)
fn stop_recording_with_vad(&mut self) {
self.is_recording = false;
self.status = "Processing...".to_string();
if let Some(audio) = &mut self.audio {
let audio_data = audio.stop_recording();
let sample_rate = audio.sample_rate();
if audio_data.is_empty() {
// No audio - restart listening
if self.vad_mode {
self.start_recording();
}
return;
}
// Smart Turn already verified this is complete speech, process directly
self.process_audio(audio_data, sample_rate, false);
}
}
/// Check VAD asynchronously using Smart Turn
fn check_vad_async(&mut self, audio_data: Vec<f32>, sample_rate: u32) {
let tx = self.ml_event_tx.clone();
let address = self.config.ml_server_address.clone();
thread::spawn(move || {
let mut client = match MlClient::connect(&address) {
Ok(c) => c,
Err(e) => {
tracing::warn!("VAD check failed to connect: {}", e);
let _ = tx.send(MlEvent::VadResult { is_speech: true, probability: 0.0 });
return;
}
};
match client.vad(&audio_data, sample_rate, 0.5) {
Ok(result) => {
// Smart Turn returns is_complete, we invert for is_speech
let _ = tx.send(MlEvent::VadResult {
is_speech: result.is_speech,
probability: result.max_probability,
});
}
Err(e) => {
tracing::warn!("VAD check failed: {}", e);
// On error, assume still speaking to avoid cutting off
let _ = tx.send(MlEvent::VadResult { is_speech: true, probability: 0.0 });
}
}
});
}
/// Process recorded audio (with optional VAD verification)
fn process_audio(&mut self, audio_data: Vec<f32>, sample_rate: u32, use_vad: bool) {
// Clone what we need for the thread
let tx = self.ml_event_tx.clone();
let address = self.config.ml_server_address.clone();
let vad_mode = self.vad_mode;
// Spawn background thread for ML processing
thread::spawn(move || {
// Connect to ML server
let mut client = match MlClient::connect(&address) {
Ok(c) => c,
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("Connection failed: {}", e)));
return;
}
};
// Verify with Silero VAD if requested
if use_vad {
match client.vad(&audio_data, sample_rate, 0.7) { // Higher threshold for noisy environments
Ok(vad_result) => {
if !vad_result.is_speech {
// No speech detected (probably fan noise) - just restart
tracing::info!("VAD: No speech detected (prob={:.2}), ignoring", vad_result.max_probability);
let _ = tx.send(MlEvent::Done);
return;
}
tracing::info!("VAD: Speech confirmed (prob={:.2})", vad_result.max_probability);
}
Err(e) => {
tracing::warn!("VAD check failed: {}, proceeding anyway", e);
}
}
}
// Transcribe
let transcribe_result = match client.transcribe(&audio_data, sample_rate) {
Ok(r) => r,
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("Transcription failed: {}", e)));
return;
}
};
let user_text = transcribe_result.text.clone();
if user_text.trim().is_empty() {
let _ = tx.send(MlEvent::Error("No speech detected".to_string()));
return;
}
let _ = tx.send(MlEvent::Transcribed { text: user_text.clone() });
// Chat with LLM
let chat_result = match client.chat(&user_text, transcribe_result.emotion.as_deref()) {
Ok(r) => r,
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("Chat failed: {}", e)));
return;
}
};
let _ = tx.send(MlEvent::Response { text: chat_result.response.clone() });
// TTS - speak response
match client.speak(&chat_result.response, None, None) {
Ok(audio_samples) => {
// TTS returns 24kHz audio
let _ = tx.send(MlEvent::Audio { samples: audio_samples, sample_rate: 24000 });
}
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("TTS failed: {}", e)));
}
}
let _ = tx.send(MlEvent::Done);
});
}
/// Send a typed text message
fn send_text_message(&mut self, text: String) {
if text.trim().is_empty() {
return;
}
self.status = "Thinking...".to_string();
self.session.write().add_user_message(&text);
let tx = self.ml_event_tx.clone();
let address = self.config.ml_server_address.clone();
// Spawn background thread for ML processing
thread::spawn(move || {
let mut client = match MlClient::connect(&address) {
Ok(c) => c,
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("Connection failed: {}", e)));
return;
}
};
// Chat with LLM
let chat_result = match client.chat(&text, None) {
Ok(r) => r,
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("Chat failed: {}", e)));
return;
}
};
let _ = tx.send(MlEvent::Response { text: chat_result.response.clone() });
// TTS - speak response
match client.speak(&chat_result.response, None, None) {
Ok(audio_samples) => {
let _ = tx.send(MlEvent::Audio { samples: audio_samples, sample_rate: 24000 });
}
Err(e) => {
let _ = tx.send(MlEvent::Error(format!("TTS failed: {}", e)));
}
}
let _ = tx.send(MlEvent::Done);
});
}
/// Process events from ML background thread
fn process_ml_events(&mut self) {
while let Ok(event) = self.ml_event_rx.try_recv() {
match event {
MlEvent::Transcribed { text } => {
self.session.write().add_user_message(&text);
self.status = "Thinking...".to_string();
}
MlEvent::Response { text } => {
self.session.write().add_assistant_message(&text);
self.status = "Speaking...".to_string();
self.is_speaking = true;
}
MlEvent::Audio { samples, sample_rate } => {
self.is_speaking = true;
if let Some(audio) = &self.audio {
if let Err(e) = audio.play_audio(samples, sample_rate) {
tracing::error!("Failed to play audio: {}", e);
}
}
}
MlEvent::Done => {
self.status = "Ready".to_string();
self.is_speaking = false;
// In VAD mode, restart recording for next utterance
if self.vad_mode && !self.is_recording {
self.vad_speech_detected = false;
self.vad_silence_frames = 0;
self.vad_check_frame = 0;
self.vad_speech_frames = 0;
self.start_recording();
}
}
MlEvent::Error(msg) => {
self.status = format!("Error: {}", msg);
self.is_speaking = false;
// In VAD mode, restart recording so user can try again
if self.vad_mode && !self.is_recording {
self.vad_speech_detected = false;
self.vad_silence_frames = 0;
self.vad_check_frame = 0;
self.vad_speech_frames = 0;
self.start_recording();
}
}
MlEvent::VadResult { is_speech, probability } => {
self.vad_check_pending = false;
if !is_speech {
// Smart Turn says turn is complete - process the audio
tracing::info!("Smart Turn: Turn complete (prob={:.2})", probability);
self.vad_speech_detected = false;
self.vad_silence_frames = 0;
self.vad_check_frame = 0;
self.vad_speech_frames = 0;
self.stop_recording_with_vad();
} else {
// Still speaking, continue recording
tracing::debug!("Smart Turn: Still speaking (prob={:.2})", probability);
}
}
}
}
}
}
impl eframe::App for VoiceModeApp {
fn on_exit(&mut self, _gl: Option<&eframe::glow::Context>) {
// Save config when window is closed properly
self.config.sidebar_visible = self.sidebar_visible;
self.config.color_index = self.color_index;
if let Err(e) = self.config.save() {
tracing::error!("Failed to save config on exit: {}", e);
} else {
tracing::info!("Config saved on exit");
}
// Kill the ML server subprocess
if let Some(mut child) = self.ml_server_process.take() {
tracing::info!("Stopping ML server...");
let _ = child.kill();
let _ = child.wait();
}
}
fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) {
// Load avatar image
self.load_avatar(ctx);
// Lazy initialization
self.initialize();
// Handle input
self.handle_input(ctx);
// Process ML events from background thread
self.process_ml_events();
// Save window position/size for restoration
ctx.input(|i| {
if let Some(rect) = i.viewport().outer_rect {
self.config.window_x = Some(rect.min.x);
self.config.window_y = Some(rect.min.y);
self.config.window_width = Some(rect.width());
self.config.window_height = Some(rect.height());
}
});
// Update waveform data from audio capture
if let Some(audio) = &self.audio {
let raw_waveform = audio.get_waveform_data();
// Downsample to 256 points for visualization
if !raw_waveform.is_empty() {
let chunk_size = (raw_waveform.len() / 256).max(1);
self.waveform_data = raw_waveform
.chunks(chunk_size)
.take(256)
.map(|chunk| {
// Use the sample with largest magnitude (preserving sign)
let sample = chunk
.iter()
.max_by(|a, b| a.abs().partial_cmp(&b.abs()).unwrap())
.copied()
.unwrap_or(0.0);
// Light amplification with soft clipping
let amplified = sample * 10.0;
// Soft clip using tanh for natural compression (preserves sign)
amplified.tanh()
})
.collect();
}
// Update audio level
let level = audio.drain_levels();
self.audio_levels = (level, level);
// VAD mode: use Smart Turn for semantic speech detection
// Smart Turn understands when you're DONE talking, not just silence
// It's robust to background noise like fans
if self.vad_mode && self.is_recording && !self.is_speaking && !self.vad_check_pending {
const VAD_CHECK_INTERVAL: u32 = 30; // Check every ~0.5s at 60fps
const MIN_AUDIO_FRAMES: u32 = 60; // Need at least 1s of audio before checking
self.vad_check_frame += 1;
// Periodically check with Smart Turn VAD
if self.vad_check_frame >= VAD_CHECK_INTERVAL && self.vad_speech_frames >= MIN_AUDIO_FRAMES {
self.vad_check_frame = 0;
// Get current audio for VAD check
if let Some(audio) = &self.audio {
let audio_data = audio.get_captured_audio();
let sample_rate = audio.sample_rate();
// Only check if we have enough audio
if audio_data.len() > (sample_rate as usize / 2) {
self.vad_check_pending = true;
self.check_vad_async(audio_data, sample_rate);
}
}
}
// Count frames of recording
self.vad_speech_frames += 1;
}
}
// Request continuous repaint for smooth animation
ctx.request_repaint();
// Render based on mode
match self.mode {
AppMode::Full => self.render_full_mode(ctx),
AppMode::Overlay => self.render_overlay_mode(ctx),
AppMode::Settings => self.render_settings_mode(ctx),
}
}
}
impl VoiceModeApp {
fn render_full_mode(&mut self, ctx: &egui::Context) {
// Top panel with status
egui::TopBottomPanel::top("top_panel").show(ctx, |ui| {
ui.horizontal(|ui| {
ui.heading("LocalVoiceMode");
ui.separator();
ui.label(&self.status);
ui.with_layout(egui::Layout::right_to_left(egui::Align::Center), |ui| {
if ui.button("⚙").clicked() {
self.mode = AppMode::Settings;
}
if ui.button("🗗").clicked() {
self.mode = AppMode::Overlay;
}
if ui.button(if self.sidebar_visible { "📂" } else { "📁" }).on_hover_text("Toggle Sidebar (Tab)").clicked() {
self.sidebar_visible = !self.sidebar_visible;
}
});
});
});
// Bottom panel with text input and shortcuts
egui::TopBottomPanel::bottom("bottom_panel").show(ctx, |ui| {
ui.horizontal(|ui| {
// Text mode checkbox
ui.checkbox(&mut self.typing_mode, "Text Mode");
// Text input
let response = ui.add(
egui::TextEdit::singleline(&mut self.text_input)
.hint_text("Type a message...")
.desired_width(200.0)
.interactive(self.typing_mode) // Only interactive when in text mode
);
if response.lost_focus() && ui.input(|i| i.key_pressed(egui::Key::Enter)) {
let text = std::mem::take(&mut self.text_input);
self.send_text_message(text);
}
if ui.button("Send").clicked() {
let text = std::mem::take(&mut self.text_input);
self.send_text_message(text);
}
ui.separator();
// VAD toggle
// Disable VAD if in typing mode
ui.add_enabled_ui(!self.typing_mode, |ui| {
if ui.checkbox(&mut self.vad_mode, "Hands-free").changed() {
if self.vad_mode {
self.start_recording();
} else {
// Don't process when turning off
self.is_recording = false;
if let Some(audio) = &mut self.audio {
audio.stop_recording();
}
}
}
});
ui.separator();
if self.typing_mode {
ui.label("Typing Mode Active");
} else {
ui.label("Space: PTT | Tab: Sidebar | C: Color");
}
});
});
// Side panel with character/settings (hideable)
if self.sidebar_visible {
egui::SidePanel::left("side_panel")
.default_width(250.0)
.show(ctx, |ui| {
ui::character_card(ui, &self.theme, &self.session.read(), self.avatar_texture.as_ref());
ui.separator();
ui::conversation_panel(ui, &self.session.read());
});
}
// Central panel with waveform
let color = self.get_waveform_color();
egui::CentralPanel::default().show(ctx, |ui| {
ui::waveform_visualizer_colored(ui, &self.waveform_data, self.is_recording, color);
});
}
fn get_waveform_color(&self) -> egui::Color32 {
match self.color_index {
0 => self.theme.burnt_orange, // Orange
1 => self.theme.lapis_lazuli, // Blue
2 => self.theme.emerald, // Green
3 => self.theme.mace_windu, // Purple
_ => self.theme.burnt_orange,
}
}
fn render_overlay_mode(&mut self, ctx: &egui::Context) {
let color = self.get_waveform_color();
egui::CentralPanel::default().show(ctx, |ui| {
ui::waveform_visualizer_colored(ui, &self.waveform_data, self.is_recording, color);
});
}
fn render_settings_mode(&mut self, ctx: &egui::Context) {
egui::CentralPanel::default().show(ctx, |ui| {
ui.heading("Settings");
ui.separator();
let mut skill_changed = false;
let mut new_skill = self.config.skill.clone();
egui::Grid::new("settings_grid")
.num_columns(2)
.spacing([40.0, 8.0])
.show(ui, |ui| {
ui.label("Skill/Personality:");
egui::ComboBox::from_id_salt("skill")
.selected_text(
self.available_skills.iter()
.find(|(id, _)| id == &self.config.skill)
.map(|(_, name)| name.as_str())
.unwrap_or(&self.config.skill)
)
.show_ui(ui, |ui| {
for (skill_id, skill_name) in &self.available_skills {
if ui.selectable_value(&mut new_skill, skill_id.clone(), skill_name).clicked() {
skill_changed = true;
}
}
});
ui.end_row();
ui.label("ML Server Address:");
ui.text_edit_singleline(&mut self.config.ml_server_address);
ui.end_row();
ui.label("TTS Model:");
egui::ComboBox::from_id_salt("tts_model")
.selected_text(&self.config.tts_model)
.show_ui(ui, |ui| {
ui.selectable_value(&mut self.config.tts_model, "pocket".to_string(), "Pocket TTS");
ui.selectable_value(&mut self.config.tts_model, "indextts2".to_string(), "IndexTTS2");
ui.selectable_value(&mut self.config.tts_model, "qwen3".to_string(), "Qwen3-TTS");
});
ui.end_row();
ui.label("Voice:");
ui.text_edit_singleline(&mut self.config.voice);
ui.end_row();
});
// Load new skill if changed
if skill_changed && new_skill != self.config.skill {
self.config.skill = new_skill.clone();
if let Some(client) = &mut self.ml_client {
if let Err(e) = client.load_skill(&new_skill) {
tracing::error!("Failed to load skill: {}", e);
self.status = format!("Skill error: {}", e);
} else {
self.status = format!("Loaded: {}", new_skill);
}
}
}
ui.separator();
if ui.button("Back").clicked() {
self.mode = AppMode::Full;
}
});
}
}