forked from hhftechnology/bandwidthlimiter
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbandwidthlimiter.go
More file actions
616 lines (517 loc) · 15.5 KB
/
bandwidthlimiter.go
File metadata and controls
616 lines (517 loc) · 15.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
// Package bandwidthlimiter implements a Traefik middleware plugin for bandwidth limiting
package bandwidthlimiter
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
// Config holds the plugin configuration
type Config struct {
// Default bandwidth limit in bytes per second (for both uploads and downloads)
DefaultLimit int64 `json:"defaultLimit"`
// Backend-specific limits: map[backend-address]limit
BackendLimits map[string]int64 `json:"backendLimits,omitempty"`
// Client IP-specific limits: map[client-ip]limit
ClientLimits map[string]int64 `json:"clientLimits,omitempty"`
// Burst size - how many bytes can be transferred in a single burst
BurstSize int64 `json:"burstSize,omitempty"`
// Maximum age of unused buckets before cleanup (in seconds)
// Default: 3600 (1 hour)
BucketMaxAge int64 `json:"bucketMaxAge,omitempty"`
// Cleanup interval in seconds
// Default: 300 (5 minutes)
CleanupInterval int64 `json:"cleanupInterval,omitempty"`
// File path for persistent bucket storage
// If empty, no file storage is used
PersistenceFile string `json:"persistenceFile,omitempty"`
// How often to save buckets to file (in seconds)
// Default: 60 (1 minute)
SaveInterval int64 `json:"saveInterval,omitempty"`
}
// CreateConfig creates the default plugin configuration
func CreateConfig() *Config {
return &Config{
DefaultLimit: 1024 * 1024, // 1 MB/s default
BackendLimits: make(map[string]int64),
ClientLimits: make(map[string]int64),
BurstSize: 10 * 1024 * 1024, // 10 MB burst default
BucketMaxAge: 3600, // 1 hour
CleanupInterval: 300, // 5 minutes
SaveInterval: 60, // 1 minute
}
}
// BandwidthLimiter implements the middleware
type BandwidthLimiter struct {
next http.Handler
name string
config *Config
buckets sync.Map // map[string]*bucketWrapper for download limiting
uploadBuckets sync.Map // map[string]*bucketWrapper for upload limiting
cleanupTicker *time.Ticker
saveTicker *time.Ticker
shutdownChan chan struct{}
wg sync.WaitGroup
}
// bucketWrapper wraps a TokenBucket with metadata for cleanup and persistence
type bucketWrapper struct {
bucket *TokenBucket
lastUsed time.Time
key string // For easier identification
}
// TokenBucket implements the token bucket algorithm for rate limiting
type TokenBucket struct {
tokens int64
limit int64
burstSize int64
lastRefill time.Time
mutex sync.Mutex
}
// bucketState represents the serializable state of a bucket
type bucketState struct {
Key string `json:"key"`
Type string `json:"type"` // "download" or "upload"
Tokens int64 `json:"tokens"`
Limit int64 `json:"limit"`
BurstSize int64 `json:"burstSize"`
LastRefill time.Time `json:"lastRefill"`
LastUsed time.Time `json:"lastUsed"`
}
// NewTokenBucket creates a new token bucket
func NewTokenBucket(limit, burstSize int64) *TokenBucket {
return &TokenBucket{
tokens: burstSize,
limit: limit,
burstSize: burstSize,
lastRefill: time.Now(),
}
}
// Consume attempts to consume tokens from the bucket
func (tb *TokenBucket) Consume(tokens int64) bool {
tb.mutex.Lock()
defer tb.mutex.Unlock()
// Refill tokens based on time elapsed
now := time.Now()
elapsed := now.Sub(tb.lastRefill)
tokensToAdd := int64(elapsed.Seconds() * float64(tb.limit))
tb.tokens = min(tb.tokens+tokensToAdd, tb.burstSize)
tb.lastRefill = now
// Check if we have enough tokens
if tb.tokens >= tokens {
tb.tokens -= tokens
return true
}
// Not enough tokens, return false
return false
}
// getState returns the serializable state of the bucket
func (tb *TokenBucket) getState() bucketState {
tb.mutex.Lock()
defer tb.mutex.Unlock()
return bucketState{
Tokens: tb.tokens,
Limit: tb.limit,
BurstSize: tb.burstSize,
LastRefill: tb.lastRefill,
}
}
// restoreFromState restores the bucket from a saved state
func (tb *TokenBucket) restoreFromState(state bucketState) {
tb.mutex.Lock()
defer tb.mutex.Unlock()
tb.tokens = state.Tokens
tb.limit = state.Limit
tb.burstSize = state.BurstSize
tb.lastRefill = state.LastRefill
}
// min helper function
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
// New creates a new BandwidthLimiter plugin
func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {
if config.DefaultLimit <= 0 {
return nil, fmt.Errorf("defaultLimit must be greater than 0")
}
if config.BurstSize == 0 {
config.BurstSize = config.DefaultLimit * 10 // Default burst is 10x the rate
}
if config.BucketMaxAge == 0 {
config.BucketMaxAge = 3600 // 1 hour default
}
if config.CleanupInterval == 0 {
config.CleanupInterval = 300 // 5 minutes default
}
if config.SaveInterval == 0 {
config.SaveInterval = 60 // 1 minute default
}
bl := &BandwidthLimiter{
next: next,
name: name,
config: config,
shutdownChan: make(chan struct{}),
}
// Load persisted buckets if persistence is enabled
if config.PersistenceFile != "" {
if err := bl.loadBuckets(); err != nil {
// Log the error but don't fail startup
fmt.Printf("Warning: Failed to load persisted buckets: %v\n", err)
}
}
// Start cleanup routine
bl.cleanupTicker = time.NewTicker(time.Duration(config.CleanupInterval) * time.Second)
bl.wg.Add(1)
go bl.cleanupRoutine()
// Start save routine if persistence is enabled
if config.PersistenceFile != "" {
bl.saveTicker = time.NewTicker(time.Duration(config.SaveInterval) * time.Second)
bl.wg.Add(1)
go bl.saveRoutine()
}
return bl, nil
}
// cleanupRoutine periodically removes unused buckets
func (bl *BandwidthLimiter) cleanupRoutine() {
defer bl.wg.Done()
for {
select {
case <-bl.cleanupTicker.C:
bl.doCleanup()
case <-bl.shutdownChan:
return
}
}
}
// doCleanup removes buckets that haven't been used recently
func (bl *BandwidthLimiter) doCleanup() {
now := time.Now()
maxAge := time.Duration(bl.config.BucketMaxAge) * time.Second
// Count buckets before cleanup
beforeCount := 0
bl.buckets.Range(func(key, value interface{}) bool {
beforeCount++
return true
})
bl.uploadBuckets.Range(func(key, value interface{}) bool {
beforeCount++
return true
})
// Remove old download buckets
bl.buckets.Range(func(key, value interface{}) bool {
wrapper := value.(*bucketWrapper)
if now.Sub(wrapper.lastUsed) > maxAge {
bl.buckets.Delete(key)
}
return true
})
// Remove old upload buckets
bl.uploadBuckets.Range(func(key, value interface{}) bool {
wrapper := value.(*bucketWrapper)
if now.Sub(wrapper.lastUsed) > maxAge {
bl.uploadBuckets.Delete(key)
}
return true
})
// Count buckets after cleanup
afterCount := 0
bl.buckets.Range(func(key, value interface{}) bool {
afterCount++
return true
})
bl.uploadBuckets.Range(func(key, value interface{}) bool {
afterCount++
return true
})
removed := beforeCount - afterCount
if removed > 0 {
fmt.Printf("Cleanup removed %d unused buckets (kept %d active buckets)\n", removed, afterCount)
}
}
// saveRoutine periodically saves buckets to file
func (bl *BandwidthLimiter) saveRoutine() {
defer bl.wg.Done()
for {
select {
case <-bl.saveTicker.C:
if err := bl.saveBuckets(); err != nil {
fmt.Printf("Error saving buckets: %v\n", err)
}
case <-bl.shutdownChan:
// Save one final time on shutdown
if err := bl.saveBuckets(); err != nil {
fmt.Printf("Error saving buckets on shutdown: %v\n", err)
}
return
}
}
}
// saveBuckets saves all current buckets to the configured file
func (bl *BandwidthLimiter) saveBuckets() error {
if bl.config.PersistenceFile == "" {
return nil // Persistence disabled
}
var states []bucketState
// Collect all download bucket states
bl.buckets.Range(func(key, value interface{}) bool {
wrapper := value.(*bucketWrapper)
state := wrapper.bucket.getState()
state.Key = key.(string)
state.Type = "download"
state.LastUsed = wrapper.lastUsed
states = append(states, state)
return true
})
// Collect all upload bucket states
bl.uploadBuckets.Range(func(key, value interface{}) bool {
wrapper := value.(*bucketWrapper)
state := wrapper.bucket.getState()
state.Key = key.(string)
state.Type = "upload"
state.LastUsed = wrapper.lastUsed
states = append(states, state)
return true
})
// Create directory if it doesn't exist
dir := filepath.Dir(bl.config.PersistenceFile)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
// Write to temporary file first (atomic save)
tempFile := bl.config.PersistenceFile + ".tmp"
file, err := os.Create(tempFile)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
defer file.Close()
encoder := json.NewEncoder(file)
encoder.SetIndent("", " ") // Pretty print for debugging
if err := encoder.Encode(states); err != nil {
return fmt.Errorf("failed to encode buckets: %w", err)
}
file.Close()
// Atomic rename
if err := os.Rename(tempFile, bl.config.PersistenceFile); err != nil {
return fmt.Errorf("failed to rename file: %w", err)
}
fmt.Printf("Saved %d buckets to %s\n", len(states), bl.config.PersistenceFile)
return nil
}
// loadBuckets loads saved buckets from the configured file
func (bl *BandwidthLimiter) loadBuckets() error {
if bl.config.PersistenceFile == "" {
return nil // Persistence disabled
}
file, err := os.Open(bl.config.PersistenceFile)
if err != nil {
if os.IsNotExist(err) {
return nil // File doesn't exist yet, that's OK
}
return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
var states []bucketState
decoder := json.NewDecoder(file)
if err := decoder.Decode(&states); err != nil {
return fmt.Errorf("failed to decode buckets: %w", err)
}
// Restore buckets
loaded := 0
for _, state := range states {
bucket := NewTokenBucket(state.Limit, state.BurstSize)
bucket.restoreFromState(state)
wrapper := &bucketWrapper{
bucket: bucket,
lastUsed: state.LastUsed,
key: state.Key,
}
// Store in appropriate map based on type
if state.Type == "upload" {
bl.uploadBuckets.Store(state.Key, wrapper)
} else {
// Default to download for backwards compatibility
bl.buckets.Store(state.Key, wrapper)
}
loaded++
}
fmt.Printf("Loaded %d buckets from %s\n", loaded, bl.config.PersistenceFile)
return nil
}
// Shutdown gracefully shuts down the bandwidth limiter
func (bl *BandwidthLimiter) Shutdown() {
close(bl.shutdownChan)
if bl.cleanupTicker != nil {
bl.cleanupTicker.Stop()
}
if bl.saveTicker != nil {
bl.saveTicker.Stop()
}
bl.wg.Wait()
}
// ServeHTTP implements the http.Handler interface
func (bl *BandwidthLimiter) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// Extract client IP
clientIP := getClientIP(req)
// Get backend address from request
backend := req.URL.Host
if backend == "" {
backend = "default"
}
// Determine the bandwidth limit to apply
limit := bl.getLimit(clientIP, backend)
// Create key for this client/backend combination
key := fmt.Sprintf("%s:%s", clientIP, backend)
// Apply upload limiting if there's a request body
if req.Body != nil && req.ContentLength != 0 {
uploadWrapper := bl.getOrCreateUploadBucket(key, limit)
uploadWrapper.lastUsed = time.Now()
// Wrap the request body with rate limiting
req.Body = &limitedReadCloser{
ReadCloser: req.Body,
bucket: uploadWrapper.bucket,
}
}
// Apply download limiting
downloadWrapper := bl.getOrCreateBucket(key, limit)
downloadWrapper.lastUsed = time.Now()
lrw := &limitedResponseWriter{
ResponseWriter: rw,
bucket: downloadWrapper.bucket,
}
// Call the next handler
bl.next.ServeHTTP(lrw, req)
}
// getOrCreateBucket gets an existing bucket or creates a new one
func (bl *BandwidthLimiter) getOrCreateBucket(key string, limit int64) *bucketWrapper {
if value, ok := bl.buckets.Load(key); ok {
return value.(*bucketWrapper)
}
// Create new bucket
bucket := NewTokenBucket(limit, bl.config.BurstSize)
wrapper := &bucketWrapper{
bucket: bucket,
lastUsed: time.Now(),
key: key,
}
// Store it (may overwrite if another goroutine created it first)
actual, _ := bl.buckets.LoadOrStore(key, wrapper)
return actual.(*bucketWrapper)
}
// getOrCreateUploadBucket gets an existing upload bucket or creates a new one
func (bl *BandwidthLimiter) getOrCreateUploadBucket(key string, limit int64) *bucketWrapper {
if value, ok := bl.uploadBuckets.Load(key); ok {
return value.(*bucketWrapper)
}
// Create new bucket
bucket := NewTokenBucket(limit, bl.config.BurstSize)
wrapper := &bucketWrapper{
bucket: bucket,
lastUsed: time.Now(),
key: key,
}
// Store it (may overwrite if another goroutine created it first)
actual, _ := bl.uploadBuckets.LoadOrStore(key, wrapper)
return actual.(*bucketWrapper)
}
// getLimit determines the bandwidth limit for a given client IP and backend
func (bl *BandwidthLimiter) getLimit(clientIP, backend string) int64 {
// Check for client-specific limit
if limit, exists := bl.config.ClientLimits[clientIP]; exists {
return limit
}
// Check for backend-specific limit
if limit, exists := bl.config.BackendLimits[backend]; exists {
return limit
}
// Return default limit
return bl.config.DefaultLimit
}
// getClientIP extracts the client IP from the request
func getClientIP(req *http.Request) string {
// Try to get IP from X-Forwarded-For header
if xff := req.Header.Get("X-Forwarded-For"); xff != "" {
ips := parseForwardedFor(xff)
if len(ips) > 0 {
return ips[0]
}
}
// Try to get IP from X-Real-IP header
if xri := req.Header.Get("X-Real-IP"); xri != "" {
return xri
}
// Fall back to RemoteAddr
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return req.RemoteAddr
}
return host
}
// parseForwardedFor parses the X-Forwarded-For header
func parseForwardedFor(xff string) []string {
var ips []string
for _, ip := range strings.Split(xff, ",") {
ip = strings.TrimSpace(ip)
if ip != "" {
ips = append(ips, ip)
}
}
return ips
}
// limitedResponseWriter wraps http.ResponseWriter to apply bandwidth limiting
type limitedResponseWriter struct {
http.ResponseWriter
bucket *TokenBucket
}
// Write applies bandwidth limiting when writing response data
func (lrw *limitedResponseWriter) Write(p []byte) (int, error) {
// Track the total bytes written
totalWritten := 0
remaining := p
for len(remaining) > 0 {
// Determine how many bytes to write in this iteration
chunkSize := min(int64(len(remaining)), 4096) // 4KB chunks
// Wait until we have tokens available
for !lrw.bucket.Consume(chunkSize) {
// No tokens available, wait a bit
time.Sleep(10 * time.Millisecond)
}
// Write the chunk
written, err := lrw.ResponseWriter.Write(remaining[:chunkSize])
totalWritten += written
if err != nil {
return totalWritten, err
}
remaining = remaining[written:]
}
return totalWritten, nil
}
// limitedReadCloser wraps io.ReadCloser to apply bandwidth limiting on uploads
type limitedReadCloser struct {
io.ReadCloser
bucket *TokenBucket
}
// Read applies bandwidth limiting when reading request data (uploads)
func (lrc *limitedReadCloser) Read(p []byte) (int, error) {
// Limit the read size to apply rate limiting more granularly
readSize := len(p)
if readSize > 4096 {
readSize = 4096 // 4KB chunks
}
// Wait until we have tokens available for this read
chunkSize := int64(readSize)
for !lrc.bucket.Consume(chunkSize) {
// No tokens available, wait a bit
time.Sleep(10 * time.Millisecond)
}
// Perform the actual read
return lrc.ReadCloser.Read(p[:readSize])
}