Skip to content

Rate Limiting

View as Markdown

This builds on Users & Sessions. You’ll add token-bucket rate limiting to authentication endpoints.

  • Go 1.22+
  • Existing auth routes in place (OAuth, password auth, or both)
  • CSRF/origin checks already wired for state-changing routes
  • For production/multi-instance deployments: Redis

Build on earlier docs by adding a rate limiter implementation:

/auth
/sessions.go
/cookies.go
/middleware.go
/passwords.go
/users.go
/ratelimit.go // token bucket implementations
/handlers
/auth.go
/pages.go
/main.go

Token bucket gives each key (IP, user ID, email, etc.) a bucket with:

  • max tokens
  • fixed refill interval per token

Each request consumes tokens. If there are no tokens left, reject the request.

Compared with a fixed window, token bucket handles bursts better and smooths traffic over time.

Put this in auth/ratelimit.go.

package auth
import (
"sync"
"time"
)
type bucket struct {
count int
refilledAt time.Time
}
type TokenBucketRateLimit struct {
max int
refillInterval time.Duration
mu sync.Mutex
storage map[string]bucket
}
func NewTokenBucketRateLimit(max int, refillInterval time.Duration) *TokenBucketRateLimit {
return &TokenBucketRateLimit{
max: max,
refillInterval: refillInterval,
storage: make(map[string]bucket),
}
}
func (l *TokenBucketRateLimit) Consume(key string, cost int) bool {
if cost <= 0 || cost > l.max {
return false
}
l.mu.Lock()
defer l.mu.Unlock()
now := time.Now().UTC()
b, ok := l.storage[key]
if !ok {
l.storage[key] = bucket{
count: l.max - cost,
refilledAt: now,
}
return true
}
refill := int(now.Sub(b.refilledAt) / l.refillInterval)
if refill > 0 {
b.count += refill
if b.count > l.max {
b.count = l.max
}
b.refilledAt = b.refilledAt.Add(time.Duration(refill) * l.refillInterval)
}
if b.count < cost {
l.storage[key] = b
return false
}
b.count -= cost
l.storage[key] = b
return true
}

Use this only when your app runs as a single process with persistent memory.

For long-running processes, add cleanup so inactive keys do not grow memory forever.

func (l *TokenBucketRateLimit) PruneIdle(maxIdle time.Duration) {
l.mu.Lock()
defer l.mu.Unlock()
cutoff := time.Now().UTC().Add(-maxIdle)
for key, b := range l.storage {
if b.refilledAt.Before(cutoff) {
delete(l.storage, key)
}
}
}
func (l *TokenBucketRateLimit) StartCleanup(interval, maxIdle time.Duration) func() {
ticker := time.NewTicker(interval)
done := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
l.PruneIdle(maxIdle)
case <-done:
ticker.Stop()
return
}
}
}()
return func() { close(done) }
}

Example: run cleanup every 5 minutes and remove buckets idle for more than 1 hour.

Put this in auth/ratelimit.go.

package auth
import (
"context"
"fmt"
"time"
"github.com/redis/go-redis/v9"
)
var tokenBucketScript = redis.NewScript(`
-- Returns 1 if allowed, 0 if not
local key = KEYS[1]
local max = tonumber(ARGV[1])
local refillIntervalSeconds = tonumber(ARGV[2])
local cost = tonumber(ARGV[3])
local nowMilliseconds = tonumber(ARGV[4])
local fields = redis.call("HGETALL", key)
if #fields == 0 then
if max < cost then
return 0
end
local expiresInSeconds = cost * refillIntervalSeconds
redis.call("HSET", key, "count", max - cost, "refilled_at_ms", nowMilliseconds)
redis.call("EXPIRE", key, expiresInSeconds)
return 1
end
local count = 0
local refilledAtMilliseconds = 0
for i = 1, #fields, 2 do
if fields[i] == "count" then
count = tonumber(fields[i + 1])
elseif fields[i] == "refilled_at_ms" then
refilledAtMilliseconds = tonumber(fields[i + 1])
end
end
local refill = math.floor((nowMilliseconds - refilledAtMilliseconds) / (refillIntervalSeconds * 1000))
count = math.min(count + refill, max)
refilledAtMilliseconds = refilledAtMilliseconds + refill * refillIntervalSeconds * 1000
if count < cost then
return 0
end
count = count - cost
local expiresInSeconds = (max - count) * refillIntervalSeconds
redis.call("HSET", key, "count", count, "refilled_at_ms", refilledAtMilliseconds)
redis.call("EXPIRE", key, expiresInSeconds)
return 1
`)
type RedisTokenBucketRateLimit struct {
client *redis.Client
storageKey string
max int64
refillIntervalSeconds int64
}
func NewRedisTokenBucketRateLimit(client *redis.Client, storageKey string, max int64, refillInterval time.Duration) *RedisTokenBucketRateLimit {
seconds := int64(refillInterval / time.Second)
if seconds < 1 {
seconds = 1
}
return &RedisTokenBucketRateLimit{
client: client,
storageKey: storageKey,
max: max,
refillIntervalSeconds: seconds,
}
}
func (l *RedisTokenBucketRateLimit) Consume(ctx context.Context, key string, cost int64) (bool, error) {
if cost <= 0 || cost > l.max {
return false, nil
}
bucketKey := fmt.Sprintf("token_bucket.v1:%s:%d:%s", l.storageKey, l.refillIntervalSeconds, key)
allowed, err := tokenBucketScript.Run(ctx, l.client, []string{bucketKey},
l.max,
l.refillIntervalSeconds,
cost,
time.Now().UTC().UnixMilli(),
).Int64()
if err != nil {
return false, err
}
return allowed == 1, nil
}

Put this in main.go.

import (
"net"
"net/http"
"strings"
"time"
auth "github.com/.../auth"
handlers "github.com/.../handlers"
)
func clientIP(r *http.Request) string {
// Only trust X-Forwarded-For when your proxy setup is trusted.
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
parts := strings.Split(xff, ",")
return strings.TrimSpace(parts[0])
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}
func userIDFromSession(r *http.Request) string {
session := auth.GetSession(r.Context())
if session == nil {
return "anonymous"
}
return session.UserID
}
func main() {
passwordLoginLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "password-login:ip", 5, 30*time.Second)
passwordSignupLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "password-signup:ip", 5, 30*time.Second)
passwordRecoveryLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "password-recovery:ip", 5, 60*time.Second)
verificationLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "verification:user", 10, 6*time.Minute)
oauthStartLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "oauth-start:ip", 20, 10*time.Second)
oauthCallbackLimiter := auth.NewRedisTokenBucketRateLimit(redisClient, "oauth-callback:ip", 20, 10*time.Second)
withRateLimit := func(limiter *auth.RedisTokenBucketRateLimit, cost int64, keyFn func(*http.Request) string, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
allowed, err := limiter.Consume(r.Context(), keyFn(r), cost)
if err != nil {
http.Error(w, "Internal error", http.StatusInternalServerError)
return
}
if !allowed {
http.Error(w, "Too many requests", http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}
// Email/password routes (if implemented)
mux.Handle("POST /signup", requireSameOrigin(withRateLimit(passwordSignupLimiter, 1, clientIP, handlers.HandleSignup(db))))
mux.Handle("POST /login", requireSameOrigin(withRateLimit(passwordLoginLimiter, 1, clientIP, handlers.HandleLogin(db))))
mux.Handle("POST /password/forgot", requireSameOrigin(withRateLimit(passwordRecoveryLimiter, 1, clientIP, handlers.HandleForgotPassword(db, mailer))))
mux.Handle("POST /verify/send", requireSameOrigin(auth.RequireSession(withRateLimit(verificationLimiter, 1, userIDFromSession, handlers.HandleSendVerificationCode(db, mailer)))))
mux.Handle("POST /verify/confirm", requireSameOrigin(auth.RequireSession(withRateLimit(verificationLimiter, 1, userIDFromSession, handlers.HandleVerifyEmail(db)))))
// OAuth routes (if implemented)
mux.Handle("GET /oauth/{provider}", withRateLimit(oauthStartLimiter, 1, clientIP, handlers.HandleOAuthStart(db)))
mux.Handle("GET /oauth/{provider}/callback", withRateLimit(oauthCallbackLimiter, 1, clientIP, handlers.HandleOAuthCallback(db)))
}

Starter defaults:

RouteKeyMaxRefill
POST /loginIP51 token / 30s
POST /signupIP51 token / 30s
POST /password/forgotIP51 token / 60s
POST /verify/sendUser ID101 token / 6m
POST /verify/confirmUser ID101 token / 6m
GET /oauth/{provider}IP201 token / 10s
GET /oauth/{provider}/callbackIP201 token / 10s

Start simple and tune with real traffic.

Head to Inactivity Timeouts to expire sessions after periods of inactivity.