#
Performance Tuning
Optimize Sonora for maximum performance and efficiency.
#
System Optimization
#
CPU Optimization
# Enable performance mode
client = SonoraClient(..., performance_mode="overdrive")
# Configure thread pools
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)
# Use CPU affinity
import os
os.sched_setaffinity(0, {0, 1, 2, 3}) # Use cores 0-3
#
Memory Optimization
# Object pooling
from sonora.utils import ObjectPool
track_pool = ObjectPool(Track, max_size=1000)
# Memory monitoring
import psutil
process = psutil.Process()
def check_memory():
mem = process.memory_info()
if mem.rss > 512 * 1024 * 1024: # 512MB
gc.collect()
#
I/O Optimization
# Connection pooling
connector = aiohttp.TCPConnector(
limit=100,
limit_per_host=10,
ttl_dns_cache=300,
use_dns_cache=True
)
# File I/O optimization
import aiofiles
async def read_large_file(path):
async with aiofiles.open(path, 'rb') as f:
return await f.read()
#
Lavalink Optimization
#
Node Configuration
# application.yml
server:
port: 2333
lavalink:
server:
password: "password"
sources:
youtube: true
bandcamp: true
bufferDurationMs: 400
frameBufferDurationMs: 1000
opusEncodingQuality: 9
resamplingQuality: HIGH
trackStuckThresholdMs: 10000
audio:
opus:
quality: 10
#
Load Balancing
# Intelligent load balancer
class SmartLoadBalancer:
def __init__(self, nodes):
self.nodes = nodes
self.metrics = {}
def select_node(self, track):
# Score nodes based on load
scores = {}
for node in self.nodes:
cpu = node.stats.get('cpu', {}).get('cores', 1)
load = node.stats.get('cpu', {}).get('systemLoad', 0)
score = cpu / (load + 1) # Higher is better
scores[node] = score
return max(scores, key=scores.get)
#
Application Tuning
#
Async Optimization
# Task management
async def run_with_timeout(coro, timeout=5.0):
try:
return await asyncio.wait_for(coro, timeout=timeout)
except asyncio.TimeoutError:
logger.warning("Operation timed out")
return None
# Concurrent operations
async def batch_process(items, batch_size=10):
semaphore = asyncio.Semaphore(batch_size)
async def process_item(item):
async with semaphore:
return await process_single_item(item)
tasks = [process_item(item) for item in items]
return await asyncio.gather(*tasks)
#
Caching Strategies
# Multi-level caching
from cachetools import TTLCache, LRUCache
# Fast L1 cache
l1_cache = LRUCache(maxsize=1000)
# L2 cache with TTL
l2_cache = TTLCache(maxsize=10000, ttl=300)
async def get_with_cache(key):
# Check L1
if key in l1_cache:
return l1_cache[key]
# Check L2
if key in l2_cache:
value = l2_cache[key]
l1_cache[key] = value
return value
# Fetch and cache
value = await fetch_data(key)
l1_cache[key] = value
l2_cache[key] = value
return value
#
Database Optimization
# Connection pooling
db_pool = await asyncpg.create_pool(
min_size=5,
max_size=20,
command_timeout=60
)
# Query optimization
async def get_tracks_batch(track_ids):
# Single query instead of multiple
query = "SELECT * FROM tracks WHERE id = ANY($1)"
return await db_pool.fetch(query, track_ids)
# Index optimization
# CREATE INDEX CONCURRENTLY idx_tracks_artist ON tracks (artist);
# CREATE INDEX CONCURRENTLY idx_tracks_title ON tracks (title);
#
Monitoring & Profiling
#
Performance Metrics
import time
from contextlib import asynccontextmanager
@asynccontextmanager
async def measure_time(operation_name):
start = time.perf_counter()
try:
yield
finally:
duration = time.perf_counter() - start
metrics.record_latency(operation_name, duration)
# Usage
async with measure_time("track_load"):
track = await player.play(query)
#
Profiling Tools
import cProfile
import pstats
def profile_function(func):
def wrapper(*args, **kwargs):
profiler = cProfile.Profile()
try:
return profiler.runcall(func, *args, **kwargs)
finally:
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative')
stats.print_stats(20) # Top 20 functions
return wrapper
# Profile async functions
@profile_function
async def profiled_operation():
await player.play("query")
#
Resource Monitoring
class ResourceMonitor:
def __init__(self):
self.metrics = {}
async def monitor(self):
while True:
self.metrics.update({
'cpu': psutil.cpu_percent(),
'memory': psutil.virtual_memory().percent,
'disk': psutil.disk_usage('/').percent,
'network': psutil.net_io_counters()
})
# Alert on thresholds
if self.metrics['cpu'] > 90:
await send_alert("High CPU usage")
if self.metrics['memory'] > 85:
await send_alert("High memory usage")
await asyncio.sleep(60) # Check every minute
#
Scaling Strategies
#
Horizontal Scaling
# Multi-node setup
nodes = [
{"host": "node1.example.com", "port": 2333},
{"host": "node2.example.com", "port": 2333},
{"host": "node3.example.com", "port": 2333}
]
client = SonoraClient(
lavalink_nodes=nodes,
node_pooling=True
)
#
Vertical Scaling
# Optimize for high load
config = {
"max_players": 1000,
"buffer_size": 1024 * 64, # 64KB
"thread_pool_size": 16,
"connection_pool_size": 100
}
#
Load Shedding
class LoadShedder:
def __init__(self, threshold=0.8):
self.threshold = threshold
async def should_shed(self):
cpu = psutil.cpu_percent() / 100
memory = psutil.virtual_memory().percent / 100
return cpu > self.threshold or memory > self.threshold
async def shed_load(self, request):
if await self.should_shed():
# Return cached response or error
return {"error": "Service overloaded", "retry_after": 30}
else:
return await process_request(request)
#
Benchmarking
#
Performance Benchmarks
# Run built-in benchmarks
sonoractl benchmark --all
# Custom benchmark
python -c "
import asyncio
import time
async def benchmark():
start = time.time()
tasks = [player.play(f'test{i}') for i in range(100)]
await asyncio.gather(*tasks)
duration = time.time() - start
print(f'100 tracks in {duration:.2f}s')
asyncio.run(benchmark())
"
#
Comparative Analysis
# Compare configurations
configs = [
{"buffer_size": 1024},
{"buffer_size": 2048},
{"buffer_size": 4096}
]
for config in configs:
# Apply config
player.buffer_size = config["buffer_size"]
# Benchmark
duration = await benchmark_operation()
print(f"Config {config}: {duration:.3f}s")
#
Best Practices
- Profile First: Use profiling to identify bottlenecks
- Cache Aggressively: Cache expensive operations
- Optimize I/O: Use async I/O and connection pooling
- Monitor Continuously: Track performance metrics
- Scale Gradually: Add resources as needed
- Test at Scale: Benchmark under realistic load
- Automate Optimization: Use automated tuning tools
- Document Changes: Track performance improvements
This tuning guide helps maximize Sonora's performance for demanding applications.