Performance testing ensures your Django application can handle expected loads, responds quickly under various conditions, and scales effectively. This chapter covers load testing, stress testing, profiling, and optimization strategies to maintain optimal application performance.
import time
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.test import TestCase, Client
from django.test.utils import override_settings
from django.contrib.auth.models import User
class LoadTestingBase(TestCase):
"""Base class for load testing"""
def setUp(self):
"""Set up test data for load testing"""
# Create test users
self.users = []
for i in range(10):
user = User.objects.create_user(
username=f'user{i}',
email=f'user{i}@example.com',
password='testpass123'
)
self.users.append(user)
# Create test data
from blog.models import BlogPost, Category
self.category = Category.objects.create(name='Tech', slug='tech')
# Create multiple posts for realistic load testing
self.posts = []
for i in range(100):
post = BlogPost.objects.create(
title=f'Load Test Post {i}',
content=f'Content for load test post {i}. ' * 50, # Longer content
author=self.users[i % len(self.users)],
category=self.category,
status='published'
)
self.posts.append(post)
def make_request(self, url, method='GET', data=None, user=None):
"""Make a single HTTP request and measure performance"""
client = Client()
# Login if user provided
if user:
client.force_login(user)
start_time = time.time()
try:
if method == 'GET':
response = client.get(url)
elif method == 'POST':
response = client.post(url, data or {})
else:
raise ValueError(f"Unsupported method: {method}")
end_time = time.time()
return {
'status_code': response.status_code,
'response_time': end_time - start_time,
'success': 200 <= response.status_code < 400,
'content_length': len(response.content),
'error': None
}
except Exception as e:
end_time = time.time()
return {
'status_code': 500,
'response_time': end_time - start_time,
'success': False,
'content_length': 0,
'error': str(e)
}
def run_load_test(self, url, num_requests=100, concurrent_users=10,
method='GET', data=None, authenticated=False):
"""Run load test with specified parameters"""
results = []
def worker():
"""Worker function for each thread"""
user = None
if authenticated and self.users:
# Assign user in round-robin fashion
user_index = threading.current_thread().ident % len(self.users)
user = self.users[user_index]
return self.make_request(url, method, data, user)
# Execute concurrent requests
with ThreadPoolExecutor(max_workers=concurrent_users) as executor:
# Submit all requests
futures = [executor.submit(worker) for _ in range(num_requests)]
# Collect results
for future in as_completed(futures):
try:
result = future.result(timeout=30) # 30 second timeout
results.append(result)
except Exception as e:
results.append({
'status_code': 500,
'response_time': 30.0,
'success': False,
'content_length': 0,
'error': f"Request timeout or error: {e}"
})
return self.analyze_results(results)
def analyze_results(self, results):
"""Analyze load test results"""
if not results:
return {'error': 'No results to analyze'}
# Extract metrics
response_times = [r['response_time'] for r in results]
successful_requests = [r for r in results if r['success']]
failed_requests = [r for r in results if not r['success']]
# Calculate statistics
analysis = {
'total_requests': len(results),
'successful_requests': len(successful_requests),
'failed_requests': len(failed_requests),
'success_rate': len(successful_requests) / len(results) * 100,
# Response time statistics
'avg_response_time': statistics.mean(response_times),
'min_response_time': min(response_times),
'max_response_time': max(response_times),
'median_response_time': statistics.median(response_times),
# Percentiles
'p95_response_time': self.percentile(response_times, 95),
'p99_response_time': self.percentile(response_times, 99),
# Throughput
'requests_per_second': len(results) / sum(response_times) * len(results),
# Error analysis
'errors': [r['error'] for r in failed_requests if r['error']],
'status_codes': self.count_status_codes(results)
}
return analysis
def percentile(self, data, percentile):
"""Calculate percentile of data"""
sorted_data = sorted(data)
index = int(len(sorted_data) * percentile / 100)
return sorted_data[min(index, len(sorted_data) - 1)]
def count_status_codes(self, results):
"""Count occurrences of each status code"""
status_counts = {}
for result in results:
code = result['status_code']
status_counts[code] = status_counts.get(code, 0) + 1
return status_counts
class BasicLoadTests(LoadTestingBase):
"""Basic load testing scenarios"""
def test_homepage_load(self):
"""Test homepage under load"""
results = self.run_load_test(
url='/',
num_requests=50,
concurrent_users=5
)
# Assert performance requirements
self.assertGreaterEqual(results['success_rate'], 95.0)
self.assertLess(results['avg_response_time'], 2.0) # < 2 seconds
self.assertLess(results['p95_response_time'], 5.0) # 95% < 5 seconds
self.assertGreater(results['requests_per_second'], 10) # > 10 RPS
print(f"Homepage Load Test Results:")
print(f"Success Rate: {results['success_rate']:.1f}%")
print(f"Average Response Time: {results['avg_response_time']:.3f}s")
print(f"95th Percentile: {results['p95_response_time']:.3f}s")
print(f"Requests/Second: {results['requests_per_second']:.1f}")
def test_blog_list_load(self):
"""Test blog list page under load"""
results = self.run_load_test(
url='/blog/',
num_requests=100,
concurrent_users=10
)
# Blog list might be slower due to database queries
self.assertGreaterEqual(results['success_rate'], 90.0)
self.assertLess(results['avg_response_time'], 3.0)
self.assertLess(results['p95_response_time'], 8.0)
print(f"Blog List Load Test Results:")
print(f"Success Rate: {results['success_rate']:.1f}%")
print(f"Average Response Time: {results['avg_response_time']:.3f}s")
print(f"Failed Requests: {results['failed_requests']}")
def test_post_detail_load(self):
"""Test individual post pages under load"""
# Test multiple different posts
post_urls = [f'/blog/{post.slug}/' for post in self.posts[:10]]
all_results = []
for url in post_urls:
results = self.run_load_test(
url=url,
num_requests=20,
concurrent_users=5
)
all_results.append(results)
# Analyze combined results
avg_success_rate = sum(r['success_rate'] for r in all_results) / len(all_results)
avg_response_time = sum(r['avg_response_time'] for r in all_results) / len(all_results)
self.assertGreaterEqual(avg_success_rate, 95.0)
self.assertLess(avg_response_time, 2.0)
print(f"Post Detail Load Test Results (10 different posts):")
print(f"Average Success Rate: {avg_success_rate:.1f}%")
print(f"Average Response Time: {avg_response_time:.3f}s")
def test_authenticated_user_load(self):
"""Test load with authenticated users"""
results = self.run_load_test(
url='/dashboard/',
num_requests=50,
concurrent_users=5,
authenticated=True
)
# Authenticated requests might be slower due to session handling
self.assertGreaterEqual(results['success_rate'], 90.0)
self.assertLess(results['avg_response_time'], 3.0)
print(f"Authenticated User Load Test Results:")
print(f"Success Rate: {results['success_rate']:.1f}%")
print(f"Average Response Time: {results['avg_response_time']:.3f}s")
class StressTests(LoadTestingBase):
"""Stress testing to find application limits"""
def test_increasing_load_stress(self):
"""Test with increasing load to find breaking point"""
load_levels = [
(10, 2), # 10 requests, 2 concurrent users
(50, 5), # 50 requests, 5 concurrent users
(100, 10), # 100 requests, 10 concurrent users
(200, 20), # 200 requests, 20 concurrent users
(500, 50), # 500 requests, 50 concurrent users
]
results_by_load = []
for num_requests, concurrent_users in load_levels:
print(f"Testing load: {num_requests} requests, {concurrent_users} concurrent users")
results = self.run_load_test(
url='/blog/',
num_requests=num_requests,
concurrent_users=concurrent_users
)
results['load_level'] = (num_requests, concurrent_users)
results_by_load.append(results)
print(f"Success Rate: {results['success_rate']:.1f}%, "
f"Avg Response Time: {results['avg_response_time']:.3f}s")
# Stop if success rate drops below threshold
if results['success_rate'] < 80.0:
print(f"Breaking point reached at {num_requests} requests, {concurrent_users} users")
break
# Brief pause between load levels
time.sleep(2)
# Analyze degradation pattern
self.analyze_performance_degradation(results_by_load)
def analyze_performance_degradation(self, results_by_load):
"""Analyze how performance degrades with increased load"""
print("\nPerformance Degradation Analysis:")
print("Load Level\t\tSuccess Rate\tAvg Response Time\tP95 Response Time")
print("-" * 80)
for results in results_by_load:
load = results['load_level']
print(f"{load[0]:3d} req, {load[1]:2d} users\t"
f"{results['success_rate']:6.1f}%\t\t"
f"{results['avg_response_time']:8.3f}s\t\t"
f"{results['p95_response_time']:8.3f}s")
# Check for acceptable degradation
if len(results_by_load) >= 2:
first_result = results_by_load[0]
last_result = results_by_load[-1]
response_time_increase = (
last_result['avg_response_time'] / first_result['avg_response_time']
)
# Response time shouldn't increase more than 3x under reasonable load
if response_time_increase > 3.0:
print(f"WARNING: Response time increased {response_time_increase:.1f}x under load")
def test_memory_stress(self):
"""Test memory usage under stress"""
import psutil
import os
process = psutil.Process(os.getpid())
# Measure initial memory
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
# Run memory-intensive operations
results = self.run_load_test(
url='/blog/',
num_requests=200,
concurrent_users=20
)
# Measure final memory
final_memory = process.memory_info().rss / 1024 / 1024 # MB
memory_increase = final_memory - initial_memory
print(f"Memory Stress Test Results:")
print(f"Initial Memory: {initial_memory:.1f} MB")
print(f"Final Memory: {final_memory:.1f} MB")
print(f"Memory Increase: {memory_increase:.1f} MB")
print(f"Success Rate: {results['success_rate']:.1f}%")
# Memory increase should be reasonable
self.assertLess(memory_increase, 200, f"Memory increased by {memory_increase:.1f} MB")
self.assertGreaterEqual(results['success_rate'], 85.0)
def test_database_connection_stress(self):
"""Test database connection handling under stress"""
from django.db import connections
from django.test.utils import override_settings
# Test with limited database connections
with override_settings(
DATABASES={
'default': {
**connections['default'].settings_dict,
'OPTIONS': {
'MAX_CONNS': 10, # Limit connections
}
}
}
):
results = self.run_load_test(
url='/blog/',
num_requests=100,
concurrent_users=15 # More users than DB connections
)
print(f"Database Connection Stress Test:")
print(f"Success Rate: {results['success_rate']:.1f}%")
print(f"Failed Requests: {results['failed_requests']}")
# Should handle connection limits gracefully
self.assertGreaterEqual(results['success_rate'], 80.0)
class DatabasePerformanceTests(TestCase):
"""Test database query performance"""
def setUp(self):
"""Set up large dataset for performance testing"""
# Create many users
self.users = []
for i in range(100):
user = User.objects.create_user(
username=f'perfuser{i}',
email=f'perfuser{i}@example.com',
password='testpass123'
)
self.users.append(user)
# Create many categories
from blog.models import Category
self.categories = []
for i in range(20):
category = Category.objects.create(
name=f'Category {i}',
slug=f'category-{i}'
)
self.categories.append(category)
# Create many posts
from blog.models import BlogPost
self.posts = []
for i in range(1000):
post = BlogPost.objects.create(
title=f'Performance Test Post {i}',
content=f'Content for performance test post {i}. ' * 100,
author=self.users[i % len(self.users)],
category=self.categories[i % len(self.categories)],
status='published'
)
self.posts.append(post)
def test_query_performance_without_optimization(self):
"""Test query performance without optimization (N+1 problem)"""
from django.test.utils import override_settings
from django.db import connection
with override_settings(DEBUG=True):
connection.queries_log.clear()
start_time = time.time()
# Unoptimized query - causes N+1 problem
posts = BlogPost.objects.all()[:50]
for post in posts:
# Each access causes additional query
author_name = post.author.username
category_name = post.category.name
end_time = time.time()
query_count = len(connection.queries)
execution_time = end_time - start_time
print(f"Unoptimized Query Performance:")
print(f"Queries: {query_count}")
print(f"Execution Time: {execution_time:.3f}s")
# Should have many queries (N+1 problem)
self.assertGreater(query_count, 50)
return query_count, execution_time
def test_query_performance_with_optimization(self):
"""Test query performance with optimization"""
from django.test.utils import override_settings
from django.db import connection
with override_settings(DEBUG=True):
connection.queries_log.clear()
start_time = time.time()
# Optimized query - uses select_related
posts = BlogPost.objects.select_related(
'author', 'category'
).all()[:50]
for post in posts:
# No additional queries needed
author_name = post.author.username
category_name = post.category.name
end_time = time.time()
query_count = len(connection.queries)
execution_time = end_time - start_time
print(f"Optimized Query Performance:")
print(f"Queries: {query_count}")
print(f"Execution Time: {execution_time:.3f}s")
# Should have minimal queries
self.assertLessEqual(query_count, 3)
return query_count, execution_time
def test_query_optimization_comparison(self):
"""Compare optimized vs unoptimized query performance"""
# Test both approaches
unopt_queries, unopt_time = self.test_query_performance_without_optimization()
opt_queries, opt_time = self.test_query_performance_with_optimization()
# Calculate improvements
query_reduction = (unopt_queries - opt_queries) / unopt_queries * 100
time_improvement = (unopt_time - opt_time) / unopt_time * 100
print(f"\nOptimization Results:")
print(f"Query Reduction: {query_reduction:.1f}%")
print(f"Time Improvement: {time_improvement:.1f}%")
# Optimization should provide significant improvement
self.assertGreater(query_reduction, 80) # At least 80% fewer queries
self.assertGreater(time_improvement, 50) # At least 50% faster
def test_bulk_operations_performance(self):
"""Test bulk database operations performance"""
from django.test.utils import override_settings
from django.db import connection
# Test individual creates (slow)
with override_settings(DEBUG=True):
connection.queries_log.clear()
start_time = time.time()
individual_posts = []
for i in range(100):
post = BlogPost.objects.create(
title=f'Individual Post {i}',
content=f'Content {i}',
author=self.users[0],
category=self.categories[0]
)
individual_posts.append(post)
individual_time = time.time() - start_time
individual_queries = len(connection.queries)
# Clean up
BlogPost.objects.filter(title__startswith='Individual Post').delete()
# Test bulk create (fast)
with override_settings(DEBUG=True):
connection.queries_log.clear()
start_time = time.time()
bulk_posts = []
for i in range(100):
post = BlogPost(
title=f'Bulk Post {i}',
content=f'Content {i}',
author=self.users[0],
category=self.categories[0]
)
bulk_posts.append(post)
BlogPost.objects.bulk_create(bulk_posts)
bulk_time = time.time() - start_time
bulk_queries = len(connection.queries)
print(f"Bulk Operations Performance Comparison:")
print(f"Individual Creates: {individual_queries} queries, {individual_time:.3f}s")
print(f"Bulk Create: {bulk_queries} queries, {bulk_time:.3f}s")
# Bulk operations should be much more efficient
self.assertLess(bulk_queries, individual_queries / 10) # At least 10x fewer queries
self.assertLess(bulk_time, individual_time / 5) # At least 5x faster
def test_pagination_performance(self):
"""Test pagination performance with large datasets"""
from django.core.paginator import Paginator
from django.test.utils import override_settings
from django.db import connection
# Test different pagination approaches
approaches = [
("Basic Pagination", lambda: BlogPost.objects.all()),
("Optimized Pagination", lambda: BlogPost.objects.select_related('author', 'category')),
("Filtered Pagination", lambda: BlogPost.objects.filter(status='published')),
]
results = {}
for name, queryset_func in approaches:
with override_settings(DEBUG=True):
connection.queries_log.clear()
start_time = time.time()
queryset = queryset_func()
paginator = Paginator(queryset, 20) # 20 items per page
# Test accessing different pages
page1 = paginator.page(1)
page5 = paginator.page(5)
page10 = paginator.page(10)
# Access items to trigger queries
for post in page1:
title = post.title
end_time = time.time()
results[name] = {
'queries': len(connection.queries),
'time': end_time - start_time
}
print(f"Pagination Performance Results:")
for name, result in results.items():
print(f"{name}: {result['queries']} queries, {result['time']:.3f}s")
# Optimized pagination should perform better
basic_queries = results["Basic Pagination"]["queries"]
optimized_queries = results["Optimized Pagination"]["queries"]
self.assertLessEqual(optimized_queries, basic_queries)
class CachePerformanceTests(TestCase):
"""Test caching performance and effectiveness"""
def setUp(self):
"""Set up test data for cache testing"""
from django.core.cache import cache
cache.clear() # Start with clean cache
self.user = User.objects.create_user('cacheuser', 'cache@example.com', 'pass')
from blog.models import BlogPost, Category
self.category = Category.objects.create(name='Cache Test', slug='cache-test')
# Create posts for cache testing
for i in range(50):
BlogPost.objects.create(
title=f'Cache Test Post {i}',
content=f'Content for cache test post {i}. ' * 20,
author=self.user,
category=self.category,
status='published'
)
def test_view_caching_performance(self):
"""Test view-level caching performance"""
from django.core.cache import cache
# Test without cache (cold)
cache.clear()
start_time = time.time()
response1 = self.client.get('/blog/')
cold_time = time.time() - start_time
# Test with cache (warm) - assuming view caching is implemented
start_time = time.time()
response2 = self.client.get('/blog/')
warm_time = time.time() - start_time
print(f"View Caching Performance:")
print(f"Cold Cache: {cold_time:.3f}s")
print(f"Warm Cache: {warm_time:.3f}s")
# Both should succeed
self.assertEqual(response1.status_code, 200)
self.assertEqual(response2.status_code, 200)
# Warm cache should be faster (if caching is implemented)
# Note: This test assumes you have view caching implemented
if warm_time < cold_time:
improvement = (cold_time - warm_time) / cold_time * 100
print(f"Cache Improvement: {improvement:.1f}%")
def test_database_query_caching(self):
"""Test database query result caching"""
from django.core.cache import cache
from django.test.utils import override_settings
from django.db import connection
# Service that caches expensive queries
class CachedBlogService:
def get_popular_posts(self, limit=10):
cache_key = f'popular_posts_{limit}'
# Try cache first
cached_posts = cache.get(cache_key)
if cached_posts is not None:
return cached_posts
# Expensive query simulation
posts = list(BlogPost.objects.select_related(
'author', 'category'
).filter(
status='published'
).order_by('-created_at')[:limit])
# Cache for 5 minutes
cache.set(cache_key, posts, 300)
return posts
service = CachedBlogService()
with override_settings(DEBUG=True):
# First call (cache miss)
connection.queries_log.clear()
start_time = time.time()
posts1 = service.get_popular_posts(10)
miss_time = time.time() - start_time
miss_queries = len(connection.queries)
# Second call (cache hit)
connection.queries_log.clear()
start_time = time.time()
posts2 = service.get_popular_posts(10)
hit_time = time.time() - start_time
hit_queries = len(connection.queries)
print(f"Database Query Caching Performance:")
print(f"Cache Miss: {miss_queries} queries, {miss_time:.3f}s")
print(f"Cache Hit: {hit_queries} queries, {hit_time:.3f}s")
# Results should be identical
self.assertEqual(len(posts1), len(posts2))
# Cache hit should be much faster
self.assertEqual(hit_queries, 0) # No database queries on cache hit
self.assertLess(hit_time, miss_time / 10) # At least 10x faster
def test_cache_invalidation_performance(self):
"""Test cache invalidation strategies"""
from django.core.cache import cache
# Cache some data
cache.set('test_key_1', 'value_1', 300)
cache.set('test_key_2', 'value_2', 300)
cache.set('test_key_3', 'value_3', 300)
# Test individual key deletion
start_time = time.time()
cache.delete('test_key_1')
individual_delete_time = time.time() - start_time
# Test pattern-based deletion (if supported)
start_time = time.time()
cache.delete_many(['test_key_2', 'test_key_3'])
batch_delete_time = time.time() - start_time
print(f"Cache Invalidation Performance:")
print(f"Individual Delete: {individual_delete_time:.6f}s")
print(f"Batch Delete: {batch_delete_time:.6f}s")
# Verify deletions
self.assertIsNone(cache.get('test_key_1'))
self.assertIsNone(cache.get('test_key_2'))
self.assertIsNone(cache.get('test_key_3'))
def test_cache_memory_usage(self):
"""Test cache memory usage patterns"""
from django.core.cache import cache
import sys
# Clear cache and measure baseline
cache.clear()
# Store large amounts of data in cache
large_data = 'x' * 10000 # 10KB string
start_time = time.time()
# Cache many large objects
for i in range(100):
cache.set(f'large_key_{i}', large_data, 300)
cache_write_time = time.time() - start_time
# Read from cache
start_time = time.time()
retrieved_data = []
for i in range(100):
data = cache.get(f'large_key_{i}')
retrieved_data.append(data)
cache_read_time = time.time() - start_time
print(f"Cache Memory Usage Performance:")
print(f"Write 100 x 10KB objects: {cache_write_time:.3f}s")
print(f"Read 100 x 10KB objects: {cache_read_time:.3f}s")
# Verify all data was cached and retrieved
self.assertEqual(len(retrieved_data), 100)
self.assertTrue(all(data == large_data for data in retrieved_data))
# Cache operations should be fast
self.assertLess(cache_write_time, 1.0)
self.assertLess(cache_read_time, 0.5)
class FrontendPerformanceTests(TestCase):
"""Test frontend performance aspects"""
def test_static_file_serving_performance(self):
"""Test static file serving performance"""
# Test CSS file serving
css_start = time.time()
css_response = self.client.get('/static/css/style.css')
css_time = time.time() - css_start
# Test JavaScript file serving
js_start = time.time()
js_response = self.client.get('/static/js/app.js')
js_time = time.time() - js_start
# Test image file serving
img_start = time.time()
img_response = self.client.get('/static/images/logo.png')
img_time = time.time() - img_start
print(f"Static File Serving Performance:")
print(f"CSS: {css_time:.3f}s ({len(css_response.content)} bytes)")
print(f"JS: {js_time:.3f}s ({len(js_response.content)} bytes)")
print(f"Image: {img_time:.3f}s ({len(img_response.content)} bytes)")
# Static files should serve quickly
# Note: These tests assume static files exist
if css_response.status_code == 200:
self.assertLess(css_time, 0.1)
if js_response.status_code == 200:
self.assertLess(js_time, 0.1)
if img_response.status_code == 200:
self.assertLess(img_time, 0.2)
def test_page_size_optimization(self):
"""Test page size and compression"""
# Test main pages
pages = [
('/', 'Homepage'),
('/blog/', 'Blog List'),
('/about/', 'About Page'),
]
for url, name in pages:
response = self.client.get(url)
if response.status_code == 200:
content_length = len(response.content)
print(f"{name} Size: {content_length:,} bytes")
# Pages should be reasonably sized
self.assertLess(content_length, 500000) # < 500KB
# Check for compression headers (if enabled)
if 'gzip' in response.get('Content-Encoding', ''):
print(f"{name} is gzipped")
def test_template_rendering_performance(self):
"""Test template rendering performance"""
from django.template import Template, Context
# Test simple template
simple_template = Template("""
<html>
<head><title>{{ title }}</title></head>
<body>
<h1>{{ heading }}</h1>
<p>{{ content }}</p>
</body>
</html>
""")
simple_context = Context({
'title': 'Test Page',
'heading': 'Welcome',
'content': 'This is test content.'
})
start_time = time.time()
for _ in range(1000):
rendered = simple_template.render(simple_context)
simple_time = time.time() - start_time
# Test complex template with loops
complex_template = Template("""
<html>
<head><title>{{ title }}</title></head>
<body>
<h1>{{ heading }}</h1>
{% for item in items %}
<div class="item">
<h3>{{ item.title }}</h3>
<p>{{ item.content|truncatewords:20 }}</p>
<small>By {{ item.author }} on {{ item.date|date:"F j, Y" }}</small>
</div>
{% endfor %}
</body>
</html>
""")
complex_context = Context({
'title': 'Complex Page',
'heading': 'Items List',
'items': [
{
'title': f'Item {i}',
'content': f'Content for item {i}. ' * 20,
'author': f'Author {i}',
'date': time.time()
}
for i in range(50)
]
})
start_time = time.time()
for _ in range(100):
rendered = complex_template.render(complex_context)
complex_time = time.time() - start_time
print(f"Template Rendering Performance:")
print(f"Simple Template (1000x): {simple_time:.3f}s")
print(f"Complex Template (100x): {complex_time:.3f}s")
# Template rendering should be fast
self.assertLess(simple_time, 1.0)
self.assertLess(complex_time, 2.0)
class PerformanceMonitoringTests(TestCase):
"""Test performance monitoring and alerting"""
def setUp(self):
self.performance_thresholds = {
'response_time_p95': 2.0, # 95th percentile < 2 seconds
'response_time_avg': 1.0, # Average < 1 second
'success_rate': 95.0, # > 95% success rate
'requests_per_second': 50, # > 50 RPS
'memory_usage_mb': 500, # < 500MB memory usage
}
def test_performance_regression_detection(self):
"""Test detection of performance regressions"""
# Baseline performance measurement
baseline_results = self.measure_baseline_performance()
# Simulate performance regression
regression_results = self.simulate_performance_regression()
# Detect regressions
regressions = self.detect_regressions(baseline_results, regression_results)
if regressions:
print("Performance Regressions Detected:")
for metric, regression in regressions.items():
print(f" {metric}: {regression['change']:.1f}% worse")
else:
print("No performance regressions detected")
def measure_baseline_performance(self):
"""Measure baseline performance metrics"""
# Run standard load test
results = self.run_standard_load_test()
return {
'avg_response_time': results['avg_response_time'],
'p95_response_time': results['p95_response_time'],
'success_rate': results['success_rate'],
'requests_per_second': results['requests_per_second']
}
def simulate_performance_regression(self):
"""Simulate performance regression for testing"""
# Add artificial delay to simulate regression
import time
original_get = self.client.get
def slow_get(*args, **kwargs):
time.sleep(0.1) # Add 100ms delay
return original_get(*args, **kwargs)
self.client.get = slow_get
try:
results = self.run_standard_load_test()
return {
'avg_response_time': results['avg_response_time'],
'p95_response_time': results['p95_response_time'],
'success_rate': results['success_rate'],
'requests_per_second': results['requests_per_second']
}
finally:
# Restore original method
self.client.get = original_get
def run_standard_load_test(self):
"""Run standard load test for monitoring"""
# This would use the LoadTestingBase class methods
# Simplified version for this example
results = []
for _ in range(20):
start_time = time.time()
response = self.client.get('/')
end_time = time.time()
results.append({
'response_time': end_time - start_time,
'success': response.status_code == 200
})
response_times = [r['response_time'] for r in results]
successful = [r for r in results if r['success']]
return {
'avg_response_time': sum(response_times) / len(response_times),
'p95_response_time': sorted(response_times)[int(len(response_times) * 0.95)],
'success_rate': len(successful) / len(results) * 100,
'requests_per_second': len(results) / sum(response_times)
}
def detect_regressions(self, baseline, current, threshold=10.0):
"""Detect performance regressions"""
regressions = {}
for metric in baseline:
if metric in current:
baseline_value = baseline[metric]
current_value = current[metric]
# Calculate percentage change
if metric == 'success_rate':
# For success rate, lower is worse
change = (baseline_value - current_value) / baseline_value * 100
else:
# For response times, higher is worse
change = (current_value - baseline_value) / baseline_value * 100
# Check if regression exceeds threshold
if change > threshold:
regressions[metric] = {
'baseline': baseline_value,
'current': current_value,
'change': change
}
return regressions
def test_performance_alerting(self):
"""Test performance alerting system"""
# Measure current performance
current_metrics = self.measure_baseline_performance()
# Check against thresholds
alerts = []
for metric, threshold in self.performance_thresholds.items():
if metric in current_metrics:
value = current_metrics[metric]
if metric == 'success_rate':
if value < threshold:
alerts.append(f"{metric}: {value:.1f}% (threshold: {threshold}%)")
else:
if value > threshold:
alerts.append(f"{metric}: {value:.3f} (threshold: {threshold})")
if alerts:
print("Performance Alerts:")
for alert in alerts:
print(f" ALERT: {alert}")
else:
print("All performance metrics within thresholds")
# In a real system, you would send these alerts to monitoring systems
# like Datadog, New Relic, or custom alerting systems
Performance testing is an ongoing process that should be integrated into your development workflow. Consider implementing:
Key performance testing concepts covered:
With comprehensive testing knowledge now complete, you have the tools to ensure your Django application is reliable, secure, and performant across all aspects of development and deployment.
Advanced Testing Topics
Advanced testing techniques help you handle complex scenarios, improve test reliability, and ensure comprehensive coverage of your Django application. This chapter covers sophisticated testing patterns, mocking strategies, async testing, and integration with external services.
Static Assets and Frontend Integration
Modern web applications require sophisticated frontend capabilities, from basic CSS and JavaScript to complex single-page applications (SPAs) built with frameworks like React or Vue. Django provides robust support for managing static assets and integrating with modern frontend toolchains, enabling you to build full-stack applications that deliver exceptional user experiences.