Rate Limiting
Understanding API rate limits and best practices.
Overview
The CraftServerManager API currently does not enforce rate limiting by default. However, this may change in future versions to ensure fair usage and server stability.
Recommended Limits
For optimal performance and fair usage, we recommend:
| Endpoint Type | Requests per Minute | Requests per Hour |
|---|---|---|
| Health Check | Unlimited | Unlimited |
| Authentication | 10 | 100 |
| Read Operations | 60 | 1000 |
| Write Operations | 30 | 500 |
| Admin Operations | 30 | 500 |
Best Practices
1. Implement Client-Side Rate Limiting
Even without server-side enforcement, implement client-side rate limiting:
class RateLimiter {
private requests: number[] = [];
constructor(
private maxRequests: number,
private windowMs: number
) {}
async checkLimit(): Promise<boolean> {
const now = Date.now();
// Remove old requests outside window
this.requests = this.requests.filter(
time => now - time < this.windowMs
);
// Check if limit reached
if (this.requests.length >= this.maxRequests) {
return false;
}
this.requests.push(now);
return true;
}
async waitForSlot(): Promise<void> {
while (!(await this.checkLimit())) {
await new Promise(resolve => setTimeout(resolve, 100));
}
}
}
// Usage
const limiter = new RateLimiter(60, 60000); // 60 req/min
async function makeRequest(url: string, token: string) {
await limiter.waitForSlot();
return fetch(url, {
headers: { 'Authorization': `Bearer ${token}` }
});
}2. Use Caching
Cache responses to reduce API calls:
class CachedAPI {
private cache = new Map<string, {data: any, expires: number}>();
async get(url: string, token: string, cacheDuration = 60000) {
const cached = this.cache.get(url);
if (cached && Date.now() < cached.expires) {
return cached.data;
}
const response = await fetch(url, {
headers: { 'Authorization': `Bearer ${token}` }
});
const data = await response.json();
this.cache.set(url, {
data,
expires: Date.now() + cacheDuration
});
return data;
}
}3. Batch Requests
Instead of multiple individual requests, batch when possible:
// Bad: Multiple individual requests
for (const username of usernames) {
await getPlayer(username);
}
// Good: Single batched request (if supported)
const players = await getPlayers(usernames);4. Use WebSocket (Future)
For real-time updates, consider WebSocket connections instead of polling:
// Instead of polling every second
setInterval(() => getServerStats(), 1000);
// Use WebSocket (when available)
const ws = new WebSocket('ws://localhost:8080/ws');
ws.onmessage = (event) => {
const stats = JSON.parse(event.data);
updateUI(stats);
};5. Implement Exponential Backoff
When receiving errors, use exponential backoff:
async function fetchWithBackoff(
url: string,
options: RequestInit,
maxRetries = 3
) {
for (let i = 0; i < maxRetries; i++) {
try {
const response = await fetch(url, options);
if (response.status === 429) {
const retryAfter = response.headers.get('Retry-After');
const delay = retryAfter
? parseInt(retryAfter) * 1000
: Math.pow(2, i) * 1000;
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
return response;
} catch (error) {
if (i === maxRetries - 1) throw error;
await new Promise(resolve =>
setTimeout(resolve, Math.pow(2, i) * 1000)
);
}
}
throw new Error('Max retries exceeded');
}Rate Limit Headers (Future)
When rate limiting is implemented, responses will include:
X-RateLimit-Limit: 60
X-RateLimit-Remaining: 45
X-RateLimit-Reset: 1705766400Headers:
X-RateLimit-Limit- Maximum requests allowed in windowX-RateLimit-Remaining- Remaining requests in current windowX-RateLimit-Reset- Unix timestamp when limit resets
Handling Rate Limit Errors
When rate limiting is enforced, you'll receive:
{
"success": false,
"error": "Too many requests",
"code": "RATE_LIMIT_EXCEEDED",
"details": {
"retryAfter": 30,
"limit": 60,
"window": 60
}
}Response: HTTP 429 (Too Many Requests)
Implementation Examples
Python with Rate Limiting
import time
from collections import deque
class RateLimiter:
def __init__(self, max_requests, window_seconds):
self.max_requests = max_requests
self.window_seconds = window_seconds
self.requests = deque()
def wait_if_needed(self):
now = time.time()
# Remove old requests
while self.requests and now - self.requests[0] > self.window_seconds:
self.requests.popleft()
# Check if we need to wait
if len(self.requests) >= self.max_requests:
wait_time = self.window_seconds - (now - self.requests[0])
if wait_time > 0:
time.sleep(wait_time)
self.requests.popleft()
self.requests.append(time.time())
# Usage
limiter = RateLimiter(max_requests=60, window_seconds=60)
def api_call(url, token):
limiter.wait_if_needed()
return requests.get(url, headers={'Authorization': f'Bearer {token}'})React Hook for Rate Limiting
import { useRef } from 'react';
function useRateLimiter(maxRequests: number, windowMs: number) {
const requests = useRef<number[]>([]);
const checkLimit = (): boolean => {
const now = Date.now();
requests.current = requests.current.filter(
time => now - time < windowMs
);
if (requests.current.length >= maxRequests) {
return false;
}
requests.current.push(now);
return true;
};
const waitForSlot = async (): Promise<void> => {
while (!checkLimit()) {
await new Promise(resolve => setTimeout(resolve, 100));
}
};
return { checkLimit, waitForSlot };
}
// Usage in component
function MyComponent() {
const { waitForSlot } = useRateLimiter(60, 60000);
const fetchData = async () => {
await waitForSlot();
const response = await fetch('/api/players');
// ...
};
}Monitoring Usage
Track your API usage:
class APIMonitor {
private stats = {
totalRequests: 0,
successfulRequests: 0,
failedRequests: 0,
rateLimited: 0
};
recordRequest(success: boolean, rateLimited = false) {
this.stats.totalRequests++;
if (success) this.stats.successfulRequests++;
else this.stats.failedRequests++;
if (rateLimited) this.stats.rateLimited++;
}
getStats() {
return {
...this.stats,
successRate: (this.stats.successfulRequests / this.stats.totalRequests) * 100
};
}
}Optimization Tips
1. Reduce Polling Frequency
// Instead of every second
setInterval(fetchData, 1000);
// Poll less frequently
setInterval(fetchData, 5000); // Every 5 seconds2. Use Conditional Requests
let etag: string | null = null;
async function fetchIfModified(url: string, token: string) {
const headers: HeadersInit = {
'Authorization': `Bearer ${token}`
};
if (etag) {
headers['If-None-Match'] = etag;
}
const response = await fetch(url, { headers });
if (response.status === 304) {
// Not modified, use cached data
return null;
}
etag = response.headers.get('ETag');
return await response.json();
}3. Debounce User Input
function debounce<T extends (...args: any[]) => any>(
func: T,
wait: number
): (...args: Parameters<T>) => void {
let timeout: NodeJS.Timeout;
return (...args: Parameters<T>) => {
clearTimeout(timeout);
timeout = setTimeout(() => func(...args), wait);
};
}
// Usage
const searchPlayers = debounce(async (query: string) => {
const results = await api.searchPlayers(query);
updateUI(results);
}, 300); // Wait 300ms after user stops typingFuture Plans
Rate limiting may be added in future versions with:
- Per-endpoint limits
- User-based quotas
- Admin exemptions
- Configurable limits
- Rate limit bypass for trusted IPs
Related
- Error Handling - Handle rate limit errors
- Getting Started - API setup
- Best Practices - Usage guidelines