Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | 1x 8x 8x 8x 8x 8x 8x 8x 8x 8x 25x 25x 1x 1x 1x 1x 25x 8x 8x 8x 11x 11x 10x 10x 10x 1x 1x 1x 11x 8x 3x 3x 3x 8x 11x 11x 11x 11x 11x 11x 11x 11x 8x 1x 1x 1x 1x 1x 8x 8x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 2x 2x 2x | /**
* AgentKits — Rate Limiter Module
*
* Token bucket rate limiter for API calls.
* Prevents 429 errors by throttling requests per provider.
*
* Usage:
* import { createRateLimiter, withRateLimit } from 'agentkits/ratelimit';
* const limiter = createRateLimiter({ maxRequests: 60, windowMs: 60000 });
* const limited = withRateLimit(createChat({ provider: 'deepseek' }), limiter);
*/
// ── Types ──────────────────────────────────────────────────────────
export interface RateLimiterConfig {
/** Max requests per window (default: 60) */
maxRequests?: number;
/** Window duration in ms (default: 60000 = 1 minute) */
windowMs?: number;
/** Max tokens per minute (optional, provider-specific) */
maxTokensPerMinute?: number;
/** Queue overflow strategy: 'wait' or 'drop' (default: 'wait') */
strategy?: 'wait' | 'drop';
}
export interface RateLimiter {
/** Acquire a slot. Resolves when ready, or throws if strategy is 'drop' and full. */
acquire(): Promise<void>;
/** Report token usage (for TPM limiting) */
reportTokens(count: number): void;
/** Current state */
readonly state: RateLimiterState;
/** Reset all counters */
reset(): void;
}
export interface RateLimiterState {
requestsInWindow: number;
tokensInWindow: number;
windowStart: number;
queueLength: number;
}
// ── Factory ────────────────────────────────────────────────────────
export function createRateLimiter(config: RateLimiterConfig = {}): RateLimiter {
const maxRequests = config.maxRequests ?? 60;
const windowMs = config.windowMs ?? 60000;
const maxTPM = config.maxTokensPerMinute ?? Infinity;
const strategy = config.strategy ?? 'wait';
let requestsInWindow = 0;
let tokensInWindow = 0;
let windowStart = Date.now();
const queue: Array<() => void> = [];
function resetWindowIfNeeded() {
const now = Date.now();
if (now - windowStart >= windowMs) {
requestsInWindow = 0;
tokensInWindow = 0;
windowStart = now;
}
}
function tryDrain() {
resetWindowIfNeeded();
while (queue.length > 0 && requestsInWindow < maxRequests && tokensInWindow < maxTPM) {
requestsInWindow++;
const resolve = queue.shift()!;
resolve();
}
}
return {
async acquire() {
resetWindowIfNeeded();
if (requestsInWindow < maxRequests && tokensInWindow < maxTPM) {
requestsInWindow++;
return;
}
if (strategy === 'drop') {
throw new Error('Rate limit exceeded (drop strategy)');
}
// Wait strategy
return new Promise<void>((resolve) => {
queue.push(resolve);
// Schedule drain at window end
const waitMs = windowMs - (Date.now() - windowStart) + 10;
setTimeout(() => tryDrain(), waitMs);
});
},
reportTokens(count: number) {
resetWindowIfNeeded();
tokensInWindow += count;
},
get state(): RateLimiterState {
resetWindowIfNeeded();
return {
requestsInWindow,
tokensInWindow,
windowStart,
queueLength: queue.length,
};
},
reset() {
requestsInWindow = 0;
tokensInWindow = 0;
windowStart = Date.now();
// Drain any waiting
while (queue.length > 0) {
const resolve = queue.shift()!;
resolve();
}
},
};
}
// ── Chat Wrapper ───────────────────────────────────────────────────
/**
* Wrap any ChatClient with rate limiting.
*/
export function withRateLimit<T extends { complete: Function; chat: Function }>(
client: T,
limiter: RateLimiter,
): T {
return new Proxy(client, {
get(target, prop, receiver) {
if (prop === 'complete' || prop === 'chat') {
return async (...args: any[]) => {
await limiter.acquire();
const result = await (target as any)[prop](...args);
if (result?.usage?.totalTokens) {
limiter.reportTokens(result.usage.totalTokens);
}
return result;
};
}
return Reflect.get(target, prop, receiver);
},
});
}
// ── Pre-configured Provider Limits ────────────────────────────────
export const PROVIDER_LIMITS: Record<string, RateLimiterConfig> = {
openai: { maxRequests: 500, windowMs: 60000, maxTokensPerMinute: 200000 },
gemini: { maxRequests: 1500, windowMs: 60000, maxTokensPerMinute: 1000000 },
deepseek: { maxRequests: 60, windowMs: 60000 },
dashscope: { maxRequests: 120, windowMs: 60000 },
zhipu: { maxRequests: 100, windowMs: 60000 },
moonshot: { maxRequests: 60, windowMs: 60000 },
minimax: { maxRequests: 60, windowMs: 60000 },
ollama: { maxRequests: 1000, windowMs: 60000 }, // local, generous
};
/** Create a rate limiter pre-configured for a specific provider */
export function createProviderLimiter(provider: string): RateLimiter {
const config = PROVIDER_LIMITS[provider] ?? { maxRequests: 60, windowMs: 60000 };
return createRateLimiter(config);
}
|