mixvideo-v2/cargos/tvai/examples/convenience_and_optimizatio...

232 lines
8.7 KiB
Rust

//! Convenience interfaces and optimization examples
use tvai::*;
use tvai::config::{global_settings, global_presets};
use tvai::utils::{GpuManager, PerformanceMonitor, optimize_for_system};
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("Topaz Video AI Library - Convenience and Optimization Examples");
// Demonstrate global settings management
demonstrate_global_settings().await?;
// Demonstrate preset management
demonstrate_preset_management().await?;
// Demonstrate GPU detection and optimization
demonstrate_gpu_optimization().await?;
// Demonstrate performance monitoring
demonstrate_performance_monitoring().await?;
// Demonstrate error handling
demonstrate_error_handling().await?;
println!("All convenience and optimization examples completed successfully!");
Ok(())
}
async fn demonstrate_global_settings() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("\n=== Global Settings Management Demo ===");
// Get global settings manager
let settings_manager = global_settings();
// Display current settings
let current_settings = settings_manager.get_settings();
println!("Current Settings:");
println!(" Default GPU usage: {}", current_settings.default_use_gpu);
println!(" Max concurrent jobs: {}", current_settings.max_concurrent_jobs);
println!(" Auto-detect Topaz: {}", current_settings.auto_detect_topaz);
println!(" Verbose logging: {}", current_settings.verbose_logging);
// Update settings
settings_manager.set_default_use_gpu(true)?;
settings_manager.set_max_concurrent_jobs(2)?;
println!("Updated settings successfully");
// Create config from global settings
match settings_manager.create_config() {
Ok(_config) => println!("Successfully created config from global settings"),
Err(e) => println!("Config creation failed (expected): {}", e.user_friendly_message()),
}
Ok(())
}
async fn demonstrate_preset_management() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("\n=== Preset Management Demo ===");
// Get global preset manager
let presets = global_presets();
let preset_manager = presets.lock().unwrap();
// List available video presets
println!("Available Video Presets:");
for preset_name in preset_manager.list_video_presets() {
if let Some(preset) = preset_manager.get_video_preset(&preset_name) {
println!(" {}: {}", preset.name, preset.description);
}
}
// List available image presets
println!("\nAvailable Image Presets:");
for preset_name in preset_manager.list_image_presets() {
if let Some(preset) = preset_manager.get_image_preset(&preset_name) {
println!(" {}: {}", preset.name, preset.description);
}
}
// Demonstrate using a preset
if let Some(preset) = preset_manager.get_video_preset("general_2x") {
println!("\nUsing preset: {}", preset.name);
if let Some(ref upscale) = preset.upscale {
println!(" Upscale model: {}", upscale.model.as_str());
println!(" Scale factor: {}x", upscale.scale_factor);
}
}
Ok(())
}
async fn demonstrate_gpu_optimization() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("\n=== GPU Optimization Demo ===");
// Detect detailed GPU information
let gpu_info = GpuManager::detect_detailed_gpu_info();
println!("GPU Detection Results:");
println!(" Available: {}", gpu_info.available);
println!(" CUDA: {}", gpu_info.cuda_available);
println!(" OpenCL: {}", gpu_info.opencl_available);
println!(" Vulkan: {}", gpu_info.vulkan_available);
// Display detected devices
println!("\nDetected GPU Devices:");
for (idx, device) in gpu_info.devices.iter().enumerate() {
println!(" Device {}: {}", idx, device.name);
println!(" Vendor: {}", device.vendor);
if let Some(memory) = device.memory_mb {
println!(" Memory: {} MB", memory);
}
println!(" AI Support: {}", device.supports_ai);
}
// Display recommendations
println!("\nRecommended Settings:");
println!(" Use GPU: {}", gpu_info.recommended_settings.use_gpu);
if let Some(device_idx) = gpu_info.recommended_settings.preferred_device {
println!(" Preferred device: {}", device_idx);
}
if let Some(memory_limit) = gpu_info.recommended_settings.memory_limit_mb {
println!(" Memory limit: {} MB", memory_limit);
}
println!(" Concurrent streams: {}", gpu_info.recommended_settings.concurrent_streams);
// Check if GPU is suitable for AI
let suitable = GpuManager::is_gpu_suitable_for_ai();
println!("\nGPU suitable for AI workloads: {}", suitable);
// Benchmark GPU performance
println!("\nRunning GPU benchmark...");
match GpuManager::benchmark_gpu_performance().await {
Ok(benchmark) => {
println!("Benchmark Results:");
println!(" Processing time: {:?}", benchmark.processing_time);
println!(" Memory bandwidth: {:.1} GB/s", benchmark.memory_bandwidth_gbps);
println!(" Compute score: {}", benchmark.compute_score);
println!(" Recommended for AI: {}", benchmark.recommended_for_ai);
}
Err(e) => println!("Benchmark failed: {}", e),
}
Ok(())
}
async fn demonstrate_performance_monitoring() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("\n=== Performance Monitoring Demo ===");
// Create optimized performance settings
let settings = optimize_for_system();
println!("Optimized Performance Settings:");
println!(" Max concurrent ops: {}", settings.max_concurrent_ops);
println!(" Processing mode: {:?}", settings.processing_mode);
println!(" Chunk size: {} MB", settings.chunk_size_mb);
println!(" Monitoring enabled: {}", settings.enable_monitoring);
// Create performance monitor
let mut monitor = PerformanceMonitor::new(settings);
// Simulate a processing operation
println!("\nSimulating processing operation...");
let metrics = {
let _permit = monitor.acquire_slot().await?;
let operation_monitor = monitor.start_operation("video_upscale", 100.0);
// Simulate processing time
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Finish monitoring
operation_monitor.finish(200.0)
};
// Record metrics after permit is dropped
monitor.record_metrics(metrics.clone());
println!("Operation completed:");
println!(" Input size: {:.1} MB", metrics.input_size_mb);
println!(" Output size: {:.1} MB", metrics.output_size_mb);
println!(" Processing time: {:?}", metrics.processing_time);
println!(" Throughput: {:.2} MB/s", metrics.throughput_mbps);
// Get performance summary
let summary = monitor.get_summary();
println!("\nPerformance Summary:");
println!(" Total operations: {}", summary.total_operations);
println!(" Average throughput: {:.2} MB/s", summary.average_throughput_mbps);
if !summary.recommendations.is_empty() {
println!(" Recommendations:");
for rec in summary.recommendations {
println!("{}", rec);
}
}
Ok(())
}
async fn demonstrate_error_handling() -> std::result::Result<(), Box<dyn std::error::Error>> {
println!("\n=== Enhanced Error Handling Demo ===");
// Demonstrate different error types and their user-friendly messages
let errors = vec![
TvaiError::TopazNotFound("/invalid/path".to_string()),
TvaiError::FfmpegError("Invalid codec".to_string()),
TvaiError::InvalidParameter("Scale factor out of range".to_string()),
TvaiError::GpuError("CUDA out of memory".to_string()),
TvaiError::UnsupportedFormat("WEBM".to_string()),
TvaiError::InsufficientResources("Not enough memory".to_string()),
TvaiError::PermissionDenied("Cannot write to output directory".to_string()),
];
for error in errors {
println!("\nError Type: {}", error.category());
println!("Recoverable: {}", error.is_recoverable());
println!("User-friendly message:");
println!("{}", error.user_friendly_message());
println!("{}", "-".repeat(50));
}
Ok(())
}
// Helper function to create a progress callback
fn create_progress_callback(operation_name: &str) -> ProgressCallback {
let name = operation_name.to_string();
Box::new(move |progress| {
let percentage = (progress * 100.0) as u32;
println!("{}: {}%", name, percentage);
})
}