live_demo.py•4.76 kB
#!/usr/bin/env python3
"""
🎵 Live Demo - Actual Audio Processing
=====================================
This creates a test audio file and demonstrates real processing!
"""
import asyncio
import sys
import numpy as np
import soundfile as sf
from pathlib import Path
# Add the src directory to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))
from stem_mcp.audio_processor import AudioProcessor
def create_test_audio():
"""Create a simple test audio file"""
print("🎵 Creating test audio file...")
# Create a 5-second audio file with multiple frequencies (simulating instruments)
duration = 5.0 # seconds
sample_rate = 44100
t = np.linspace(0, duration, int(sample_rate * duration))
# Create different "instruments" at different frequencies
bass_freq = 80 # Bass frequency
mid_freq = 440 # Mid frequency (A4)
high_freq = 1760 # High frequency (A6)
# Generate different waveforms
bass = 0.3 * np.sin(2 * np.pi * bass_freq * t)
mid = 0.4 * np.sin(2 * np.pi * mid_freq * t)
high = 0.2 * np.sin(2 * np.pi * high_freq * t)
# Add some envelope to make it more musical
envelope = np.exp(-0.5 * t) * (1 - np.exp(-5 * t))
# Mix them together
mixed_audio = (bass + mid + high) * envelope
# Make it stereo
stereo_audio = np.column_stack([mixed_audio, mixed_audio])
# Save the test file
test_file = "examples/test_sample.wav"
Path("examples").mkdir(exist_ok=True)
sf.write(test_file, stereo_audio, sample_rate)
print(f"✅ Created test audio: {test_file}")
print(f" Duration: {duration} seconds")
print(f" Sample rate: {sample_rate} Hz")
print(f" Channels: Stereo")
print(f" Contains: Bass ({bass_freq}Hz), Mid ({mid_freq}Hz), High ({high_freq}Hz)")
return test_file
async def demo_with_real_audio():
"""Demo with actual audio processing"""
print("\n" + "="*60)
print("🎬 LIVE AUDIO PROCESSING DEMO")
print("="*60)
# Create test audio
test_file = create_test_audio()
# Initialize processor
print(f"\n⚡ Initializing audio processor...")
processor = AudioProcessor()
print(f"✅ Processor ready (device: {processor.device})")
# Demo 1: Analyze the test audio
print(f"\n📊 DEMO 1: Analyzing test audio...")
try:
analysis_result = await processor.analyze_audio(test_file)
print("✅ Analysis complete!")
print(analysis_result)
except Exception as e:
print(f"❌ Analysis failed: {e}")
# Demo 2: Create a loop from the test audio
print(f"\n🔄 DEMO 2: Creating loop from test audio...")
try:
loop_result = await processor.create_loop(
audio_path=test_file,
loop_duration=2.0,
crossfade_duration=0.1
)
print("✅ Loop creation complete!")
print(loop_result)
except Exception as e:
print(f"❌ Loop creation failed: {e}")
# Demo 3: Split the audio into segments
print(f"\n✂️ DEMO 3: Splitting audio into segments...")
try:
split_result = await processor.split_stems(
stem_path=test_file,
segment_length=2.0,
overlap=0.5,
output_dir="examples/segments"
)
print("✅ Audio splitting complete!")
print(split_result)
except Exception as e:
print(f"❌ Audio splitting failed: {e}")
# Demo 4: Show what stem separation would do (without actually running it)
print(f"\n🎤 DEMO 4: Stem separation (simulated)")
print("Note: Actual stem separation would take longer and requires the full Demucs model")
print("For demonstration, here's what would happen:")
print(" 📁 test_sample/vocals.wav - Would isolate vocal-like frequencies")
print(" 📁 test_sample/drums.wav - Would isolate percussive elements")
print(" 📁 test_sample/bass.wav - Would isolate low-frequency content")
print(" 📁 test_sample/other.wav - Would isolate remaining instruments")
# Show created files
print(f"\n📁 Files created during this demo:")
examples_path = Path("examples")
for file_path in examples_path.rglob("*"):
if file_path.is_file():
size_mb = file_path.stat().st_size / (1024 * 1024)
print(f" 📄 {file_path} ({size_mb:.2f} MB)")
print(f"\n🎉 Live demo complete!")
print(f"You can now:")
print(f"1. Listen to the created files in examples/")
print(f"2. Use these files with your MCP client")
print(f"3. Test the full stem separation on your own music files")
if __name__ == "__main__":
asyncio.run(demo_with_real_audio())