Skip to main content
Glama
QAZ83
by QAZ83
demo_cli.cpp7.63 kB
/** * @file demo_cli.cpp * @brief CLI Demo لمعاينة وظائف AI Forge Studio * * نسخة تجريبية تعمل في سطر الأوامر لعرض قدرات البرنامج * بدون الحاجة لواجهة رسومية */ #include <iostream> #include <iomanip> #include <thread> #include <chrono> #include <string> #include <vector> using namespace std; // ألوان ANSI للطباعة #define RESET "\033[0m" #define RED "\033[31m" #define GREEN "\033[32m" #define YELLOW "\033[33m" #define BLUE "\033[34m" #define MAGENTA "\033[35m" #define CYAN "\033[36m" #define BOLD "\033[1m" void printHeader() { cout << CYAN << BOLD; cout << "\n╔════════════════════════════════════════════════════╗\n"; cout << "║ AI FORGE STUDIO - CLI DEMO ║\n"; cout << "║ RTX 50-Series Edition ║\n"; cout << "╚════════════════════════════════════════════════════╝\n"; cout << RESET << "\n"; } void printSection(const string& title) { cout << "\n" << YELLOW << BOLD << "═══ " << title << " ═══" << RESET << "\n\n"; } void simulateProgress(const string& task, int steps = 10) { cout << CYAN << task << RESET << " "; for (int i = 0; i <= steps; i++) { cout << "█" << flush; this_thread::sleep_for(chrono::milliseconds(100)); } cout << GREEN << " ✓" << RESET << "\n"; } void showGPUStatus() { printSection("GPU Status"); cout << BOLD << "Device:" << RESET << " NVIDIA GeForce RTX 5090\n"; cout << BOLD << "Driver:" << RESET << " 550.54.14\n"; cout << BOLD << "CUDA:" << RESET << " 12.3\n\n"; // GPU Utilization cout << "GPU Usage: [" << CYAN; for (int i = 0; i < 67; i++) cout << "▓"; for (int i = 67; i < 100; i++) cout << "░"; cout << RESET << "] 67%\n"; // Temperature cout << "Temperature: [" << GREEN; for (int i = 0; i < 65; i++) cout << "▓"; for (int i = 65; i < 100; i++) cout << "░"; cout << RESET << "] 65°C\n"; // VRAM cout << "VRAM: [" << MAGENTA; for (int i = 0; i < 68; i++) cout << "▓"; for (int i = 68; i < 100; i++) cout << "░"; cout << RESET << "] 16.2 / 24 GB\n"; // Power cout << "Power: [" << YELLOW; for (int i = 0; i < 70; i++) cout << "▓"; for (int i = 70; i < 100; i++) cout << "░"; cout << RESET << "] 420W\n"; cout << "\n" << GREEN << "✓ GPU Running Optimally" << RESET << "\n"; } void showModelManager() { printSection("AI Model Manager"); vector<tuple<string, string, string, bool>> models = { {"Stable Diffusion XL", "6.9 GB", "8.2 GB VRAM", true}, {"RealESRGAN 4x", "64 MB", "512 MB VRAM", true}, {"LLaMA 3.1 8B", "4.1 GB", "5.5 GB VRAM", false} }; cout << left << setw(25) << "Model Name" << setw(12) << "Size" << setw(15) << "VRAM Usage" << "Status\n"; cout << string(70, '─') << "\n"; for (const auto& [name, size, vram, loaded] : models) { cout << left << setw(25) << name << setw(12) << size << setw(15) << vram; if (loaded) { cout << GREEN << "● Loaded" << RESET; } else { cout << RED << "○ Offline" << RESET; } cout << "\n"; } cout << "\n" << CYAN << "Total VRAM Usage: 8.7 GB" << RESET << "\n"; } void simulateImageGeneration() { printSection("Image Generation"); cout << BOLD << "Prompt:" << RESET << " A futuristic AI laboratory with glowing displays\n"; cout << BOLD << "Model:" << RESET << " Stable Diffusion XL\n"; cout << BOLD << "Steps:" << RESET << " 50\n"; cout << BOLD << "Size:" << RESET << " 1024x1024\n\n"; simulateProgress("Loading model", 8); simulateProgress("Encoding prompt", 5); cout << "\n" << CYAN << "Generating image..." << RESET << "\n"; // Simulate diffusion steps for (int step = 1; step <= 50; step++) { cout << "\rStep " << setw(2) << step << "/50 ["; int progress = (step * 40) / 50; for (int i = 0; i < 40; i++) { if (i < progress) cout << CYAN << "█" << RESET; else cout << "░"; } cout << "] " << (step * 100 / 50) << "%" << flush; this_thread::sleep_for(chrono::milliseconds(50)); } cout << "\n\n"; simulateProgress("Decoding latents", 5); simulateProgress("Post-processing", 3); cout << "\n" << GREEN << BOLD << "✓ Image generated successfully!" << RESET << "\n"; cout << " Time: 12.34 seconds\n"; cout << " VRAM: 8.2 GB peak\n"; cout << " Output: ./output/generated_image_001.png\n"; } void showPerformanceStats() { printSection("Performance Statistics"); cout << BOLD << "System Metrics:" << RESET << "\n\n"; // CPU cout << "CPU Usage: 45% ["; for (int i = 0; i < 45; i++) cout << BLUE << "▓" << RESET; for (int i = 45; i < 100; i++) cout << "░"; cout << "]\n"; // RAM cout << "RAM Usage: 24.5 / 64 GB ["; for (int i = 0; i < 38; i++) cout << MAGENTA << "▓" << RESET; for (int i = 38; i < 100; i++) cout << "░"; cout << "]\n"; // FPS cout << "Render FPS: " << GREEN << BOLD << "60" << RESET << " fps\n\n"; // Features cout << BOLD << "Active Features:" << RESET << "\n"; cout << " " << GREEN << "✓" << RESET << " TensorRT Optimization\n"; cout << " " << GREEN << "✓" << RESET << " DLSS 3.5\n"; cout << " " << GREEN << "✓" << RESET << " Ray Tracing\n"; cout << " " << GREEN << "✓" << RESET << " FP16 Precision\n"; } void showMenu() { cout << "\n" << BOLD << "Available Actions:" << RESET << "\n"; cout << " 1. View GPU Status\n"; cout << " 2. Model Manager\n"; cout << " 3. Generate Image\n"; cout << " 4. Performance Stats\n"; cout << " 5. Exit\n\n"; } int main() { printHeader(); cout << CYAN << "Initializing AI Forge Studio..." << RESET << "\n"; simulateProgress("Loading hardware monitor", 5); simulateProgress("Initializing AI engine", 7); simulateProgress("Setting up render pipeline", 6); cout << "\n" << GREEN << BOLD << "✓ System Ready!" << RESET << "\n"; this_thread::sleep_for(chrono::milliseconds(500)); // Demo sequence showGPUStatus(); this_thread::sleep_for(chrono::seconds(2)); showModelManager(); this_thread::sleep_for(chrono::seconds(2)); simulateImageGeneration(); this_thread::sleep_for(chrono::seconds(2)); showPerformanceStats(); cout << "\n" << CYAN << BOLD; cout << "╔════════════════════════════════════════════════════╗\n"; cout << "║ Demo Complete! This showcases the capabilities ║\n"; cout << "║ of AI Forge Studio's core modules. ║\n"; cout << "║ ║\n"; cout << "║ For full GUI experience, build and run on a ║\n"; cout << "║ system with NVIDIA RTX GPU and Qt6 installed. ║\n"; cout << "╚════════════════════════════════════════════════════╝\n"; cout << RESET << "\n"; return 0; }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/QAZ83/remote-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server