#
⚡ Performance Tuning Guide
This guide covers performance optimization techniques and best practices for Sonora v1.2.7.
#
Performance Architecture
Sonora v1.2.7 features a high-performance architecture designed for enterprise workloads:
- Lock-free async queues for concurrent operations
- Zero-copy payload routing to minimize memory overhead
- Adaptive backpressure to prevent system overload
- CPU-aware load balancing across Lavalink nodes
#
Memory Optimization
#
Garbage Collection Tuning
import gc
import psutil
# Monitor memory usage
process = psutil.Process()
memory_usage = process.memory_info().rss / 1024 / 1024 # MB
# Force garbage collection during idle periods
if memory_usage > 500: # 500MB threshold
gc.collect()
#
Connection Pooling
# Configure connection pooling
client = SonoraClient(
lavalink_nodes=[{
"host": "127.0.0.1",
"port": 2333,
"password": "pass",
"pool_size": 10, # Connection pool size
"max_keepalive": 300 # Keep-alive duration
}],
connection_pooling=True
)
#
CPU Optimization
#
Async Task Management
import asyncio
from sonora import performance_monitor
# Monitor CPU usage
cpu_percent = performance_monitor.get_system_stats()['cpu_percent']
# Adjust concurrency based on CPU load
max_concurrent = 10 if cpu_percent < 70 else 5
semaphore = asyncio.Semaphore(max_concurrent)
async def process_with_backpressure(task):
async with semaphore:
return await task
#
Load Balancing
# CPU-aware node selection
async def select_optimal_node(client, guild_id):
nodes = client.nodes.values()
# Score nodes by CPU usage and latency
scored_nodes = []
for node in nodes:
if node.connected:
cpu_score = 1.0 / (node.cpu_usage + 1) # Lower CPU is better
latency_score = 1.0 / (node.latency + 1) # Lower latency is better
total_score = cpu_score + latency_score
scored_nodes.append((node, total_score))
# Select highest scoring node
return max(scored_nodes, key=lambda x: x[1])[0] if scored_nodes else None
#
Network Optimization
#
Connection Tuning
# Optimize Lavalink connections
node_config = {
"host": "127.0.0.1",
"port": 2333,
"password": "pass",
"timeout": 30, # Connection timeout
"reconnect_delay": 5, # Initial reconnect delay
"max_reconnect_attempts": 10,
"heartbeat_interval": 30, # Heartbeat frequency
"buffer_size": 8192, # Socket buffer size
}
#
Payload Compression
# Enable payload compression for large data
client = SonoraClient(
lavalink_nodes=[node_config],
compression=True, # Enable gzip compression
compression_level=6 # Compression level (1-9)
)
#
Queue Optimization
#
Smart Batching
from sonora import SmartQueue
class OptimizedQueue(SmartQueue):
def __init__(self, guild_id):
super().__init__(guild_id)
self.batch_size = 10
self.batch_timeout = 0.1 # 100ms
async def add_batch(self, tracks):
"""Add tracks in optimized batches"""
for i in range(0, len(tracks), self.batch_size):
batch = tracks[i:i + self.batch_size]
await self.add_multiple(batch)
# Small delay to prevent overwhelming
if i + self.batch_size < len(tracks):
await asyncio.sleep(self.batch_timeout)
#
Memory-Efficient Queues
# Configure queue memory limits
queue = SmartQueue(guild_id=123)
queue.max_history_size = 100 # Limit history size
queue.max_upcoming_size = 500 # Limit queue size
# Automatic cleanup
async def cleanup_queue():
while True:
await asyncio.sleep(300) # Every 5 minutes
# Remove old history items
while len(queue.history) > queue.max_history_size:
queue.history.pop(0)
# Remove low-priority upcoming tracks if queue too long
if len(queue.upcoming) > queue.max_upcoming_size:
# Keep only high-priority tracks
queue.upcoming = queue.upcoming[:queue.max_upcoming_size]
#
Autoplay Optimization
#
Intelligent Caching
from sonora import AutoplayEngine
class CachedAutoplayEngine(AutoplayEngine):
def __init__(self, guild_id, cache_size=1000):
super().__init__(guild_id)
self.recommendation_cache = {}
self.cache_size = cache_size
async def fetch_next_track(self, context):
# Check cache first
cache_key = self._generate_cache_key(context)
if cache_key in self.recommendation_cache:
cached_result = self.recommendation_cache[cache_key]
if time.time() - cached_result['timestamp'] < 3600: # 1 hour
return cached_result['track']
# Fetch new recommendation
track = await super().fetch_next_track(context)
# Cache result
if track:
self.recommendation_cache[cache_key] = {
'track': track,
'timestamp': time.time()
}
# Maintain cache size
if len(self.recommendation_cache) > self.cache_size:
# Remove oldest entries
oldest_key = min(
self.recommendation_cache.keys(),
key=lambda k: self.recommendation_cache[k]['timestamp']
)
del self.recommendation_cache[oldest_key]
return track
def _generate_cache_key(self, context):
"""Generate cache key from context"""
history_titles = [t.title for t in context.get('history', [])[-5:]]
current_title = context.get('current', {}).get('title', '')
return hashlib.md5(f"{current_title}:{','.join(history_titles)}".encode()).hexdigest()
#
Rate Limiting
# Configure autoplay rate limits
autoplay = AutoplayEngine(guild_id)
autoplay.max_requests_per_minute = 30 # Limit external API calls
autoplay.cache_ttl = 3600 # Cache recommendations for 1 hour
#
Profiling and Monitoring
#
Built-in Profiler
from sonora import performance_profiler
# Start profiling
performance_profiler.start_profiling()
# Run your bot operations...
# Stop and analyze
results = performance_profiler.stop_profiling()
print(f"Total execution time: {results['execution_time']:.2f}s")
print(f"Peak memory usage: {results['memory_peak_mb']:.1f} MB")
# Analyze slowest functions
for line in results['profile_stats'].split('\n')[:10]:
if line.strip():
print(line)
#
Performance Metrics
from sonora import performance_monitor
# Track custom metrics
performance_monitor.record_timing("track_load", 0.234)
performance_monitor.increment_counter("tracks_played")
performance_monitor.set_gauge("active_guilds", 150)
# Get comprehensive stats
stats = performance_monitor.get_stats()
system_stats = performance_monitor.get_system_stats()
print(f"CPU Usage: {system_stats['cpu_percent']:.1f}%")
print(f"Memory: {system_stats['memory_mb']:.1f} MB")
print(f"Active connections: {stats.get('counters', {}).get('active_connections', 0)}")
#
Scaling Strategies
#
Horizontal Scaling
# Multi-node configuration
nodes = [
{"host": "node1.example.com", "port": 2333, "region": "us-east"},
{"host": "node2.example.com", "port": 2333, "region": "us-west"},
{"host": "node3.example.com", "port": 2333, "region": "eu-central"},
]
client = SonoraClient(
lavalink_nodes=nodes,
node_pooling=True,
load_balancing="latency" # or "cpu", "random"
)
#
Vertical Scaling
# Optimize for high concurrency
client = SonoraClient(
lavalink_nodes=[node_config],
max_concurrent_requests=100,
connection_pool_size=20,
enable_compression=True
)
# Configure queue for high throughput
queue = SmartQueue(guild_id)
queue.enable_adaptive_reorder = False # Disable for performance
queue.max_concurrent_operations = 50
#
Benchmarking
#
Performance Benchmarks
# Run comprehensive benchmarks
sonoractl benchmark
# Profile specific operations
sonoractl profile
# Test with different loads
# - 100 concurrent tracks
# - 1000 queue size
# - Network latency simulation
#
Custom Benchmarks
import time
import asyncio
from sonora import performance_profiler
async def benchmark_track_loading(client, num_tracks=100):
"""Benchmark track loading performance"""
performance_profiler.start_profiling()
start_time = time.time()
# Load tracks concurrently
tasks = []
for i in range(num_tracks):
query = f"test track {i}"
task = client.load_track(f"ytsearch:{query}")
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
end_time = time.time()
profile_results = performance_profiler.stop_profiling()
successful_loads = len([r for r in results if not isinstance(r, Exception)])
return {
"total_time": end_time - start_time,
"successful_loads": successful_loads,
"failure_rate": (num_tracks - successful_loads) / num_tracks,
"avg_time_per_track": (end_time - start_time) / num_tracks,
"profile_data": profile_results
}
#
Troubleshooting Performance Issues
#
Common Performance Problems
High CPU Usage:
- Check for infinite loops in plugins
- Monitor asyncio task count
- Profile with
performance_profiler
High Memory Usage:
- Check for memory leaks in plugins
- Monitor queue sizes
- Use
tracemallocfor detailed analysis
Slow Track Loading:
- Check network latency to Lavalink
- Monitor Lavalink node performance
- Enable caching and connection pooling
Queue Performance Issues:
- Check queue size limits
- Monitor concurrent operations
- Enable adaptive reordering only when needed
#
Performance Monitoring Dashboard
# Create performance dashboard
async def performance_dashboard():
while True:
stats = performance_monitor.get_stats()
system = performance_monitor.get_system_stats()
print("=== Performance Dashboard ===")
print(f"CPU: {system['cpu_percent']:.1f}%")
print(f"Memory: {system['memory_mb']:.1f} MB")
print(f"Uptime: {stats['uptime']:.1f}s")
print(f"Active connections: {stats.get('counters', {}).get('active_connections', 0)}")
print(f"Tracks played: {stats.get('counters', {}).get('tracks_played', 0)}")
await asyncio.sleep(60) # Update every minute
#
Best Practices
#
1. Connection Management
- Use connection pooling
- Configure appropriate timeouts
- Enable compression for large payloads
#
2. Memory Management
- Set reasonable queue size limits
- Implement automatic cleanup
- Monitor memory usage patterns
#
3. CPU Optimization
- Use async operations throughout
- Implement backpressure
- Profile regularly to identify bottlenecks
#
4. Network Efficiency
- Enable compression
- Use appropriate buffer sizes
- Implement request batching
#
5. Monitoring
- Set up comprehensive monitoring
- Use performance profiling regularly
- Monitor system resources continuously
#
Enterprise Deployment
#
Production Configuration
# Production-optimized configuration
client = SonoraClient(
lavalink_nodes=[
{
"host": "production-node-1",
"port": 2333,
"password": os.getenv("LAVALINK_PASSWORD"),
"timeout": 30,
"reconnect_delay": 5,
"max_reconnect_attempts": 10,
"heartbeat_interval": 30,
"buffer_size": 16384,
}
],
connection_pooling=True,
compression=True,
max_concurrent_requests=200,
enable_metrics=True,
security_enabled=True
)
This comprehensive performance tuning guide will help you optimize Sonora v1.2.7 for maximum efficiency and scalability in production environments.