From fbe2b6efece456dfb33bf229bd5d86ca1ddc8694 Mon Sep 17 00:00:00 2001 From: "Derrick J. Wippler" Date: Tue, 16 Apr 2024 16:40:33 -0500 Subject: [PATCH] WIP: New V3.0 Release --- .github/workflows/master.yaml | 4 +- README.md | 52 +-- algorithms.go | 25 +- benchmark_cache_test.go | 2 +- benchmark_test.go | 93 ++-- buf.gen.yaml | 13 - buf.yaml | 5 +- client.go | 171 ++++++- cluster/cluster.go | 52 +-- cluster/cluster_test.go | 31 +- cmd/gubernator-cli/main.go | 48 +- cmd/gubernator-cluster/main.go | 19 +- cmd/gubernator/main.go | 6 +- cmd/gubernator/main_test.go | 7 +- cmd/healthcheck/main.go | 7 +- config.go | 52 +-- config_test.go | 10 +- daemon.go | 549 +++++++++-------------- dns.go | 13 +- etcd.go | 14 +- functional_test.go | 523 +++++++++++---------- global.go | 70 +-- go.mod | 5 +- go.sum | 4 + grpc_stats.go | 145 ------ gubernator.go | 309 ++++++------- gubernator.pb.go | 403 ++++++++--------- gubernator.pb.gw.go | 240 ---------- gubernator.proto | 54 +-- gubernator_grpc.pb.go | 165 ------- handler.go | 168 +++++++ interval_test.go | 2 +- kubernetes.go | 4 +- lrucache_test.go | 2 +- memberlist.go | 4 +- metadata_carrier_test.go | 2 +- mock_cache_test.go | 2 +- mock_loader_test.go | 2 +- mock_store_test.go | 6 +- peer.go | 413 +++++++++++++++++ peer.pb.go | 425 ++++++++++++++++++ peers.proto => peer.proto | 37 +- peer_client.go | 435 ------------------ peer_client_test.go => peer_test.go | 71 ++- peers.pb.go | 495 -------------------- peers.pb.gw.go | 256 ----------- peers_grpc.pb.go | 163 ------- python/gubernator/__init__.py | 21 - python/gubernator/gubernator_pb2.py | 65 ++- python/gubernator/gubernator_pb2_grpc.py | 102 ----- python/gubernator/peer_pb2.py | 34 ++ python/gubernator/peers_pb2.py | 38 -- python/gubernator/peers_pb2_grpc.py | 104 ----- python/requirements-py2.txt | 15 - python/requirements-py3.txt | 11 - python/setup.py | 57 --- python/tests/__init__.py | 0 python/tests/test_client.py | 60 --- region_picker.go | 28 +- replicated_hash.go | 45 +- replicated_hash_test.go | 29 +- staticbuilder.go | 45 -- store.go | 10 +- store_test.go | 164 +++---- tls_test.go | 74 ++- workers.go | 40 +- workers_test.go | 2 +- 67 files changed, 2582 insertions(+), 3940 deletions(-) delete mode 100644 grpc_stats.go delete mode 100644 gubernator.pb.gw.go delete mode 100644 gubernator_grpc.pb.go create mode 100644 handler.go create mode 100644 peer.go create mode 100644 peer.pb.go rename peers.proto => peer.proto (68%) delete mode 100644 peer_client.go rename peer_client_test.go => peer_test.go (58%) delete mode 100644 peers.pb.go delete mode 100644 peers.pb.gw.go delete mode 100644 peers_grpc.pb.go delete mode 100644 python/gubernator/__init__.py delete mode 100644 python/gubernator/gubernator_pb2_grpc.py create mode 100644 python/gubernator/peer_pb2.py delete mode 100644 python/gubernator/peers_pb2.py delete mode 100644 python/gubernator/peers_pb2_grpc.py delete mode 100644 python/requirements-py2.txt delete mode 100644 python/requirements-py3.txt delete mode 100755 python/setup.py delete mode 100644 python/tests/__init__.py delete mode 100644 python/tests/test_client.py delete mode 100644 staticbuilder.go diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml index 34450a8..83dd798 100644 --- a/.github/workflows/master.yaml +++ b/.github/workflows/master.yaml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-go@v5 with: @@ -40,7 +40,7 @@ jobs: comment-on-alert: true - name: Save benchmark JSON to cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 with: path: ./cache/benchmark-data.json # Save with commit hash to avoid "cache already exists" diff --git a/README.md b/README.md index 7d1c267..6c9c8ab 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ Gubernator is a distributed, high performance, cloud native and stateless rate-l kubernetes or nomad trivial. * Gubernator holds no state on disk, It’s configuration is passed to it by the client on a per-request basis. -* Gubernator provides both GRPC and HTTP access to the API. * It Can be run as a sidecar to services that need rate limiting or as a separate service. * It Can be used as a library to implement a domain-specific rate limiting service. * Supports optional eventually consistent rate limit distribution for extremely @@ -38,8 +37,10 @@ $ docker-compose up -d ``` Now you can make rate limit requests via CURL ``` -# Hit the HTTP API at localhost:9080 (GRPC is at 9081) -$ curl http://localhost:9080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 +$ curl http://localhost:9080/v1/health.check + +# TODO: Update this example # Make a rate limit request $ curl http://localhost:9080/v1/GetRateLimits \ @@ -59,7 +60,7 @@ $ curl http://localhost:9080/v1/GetRateLimits \ ### ProtoBuf Structure -An example rate limit request sent via GRPC might look like the following +An example rate limit request sent with protobuf might look like the following ```yaml rate_limits: # Scopes the request to a specific rate limit @@ -214,7 +215,7 @@ limiting service. When you use the library, your service becomes a full member of the cluster participating in the same consistent hashing and caching as a stand alone -Gubernator server would. All you need to do is provide the GRPC server instance +Gubernator server would. All you need to do is provide the server instance and tell Gubernator where the peers in your cluster are located. The `cmd/gubernator/main.go` is a great example of how to use Gubernator as a library. @@ -238,21 +239,13 @@ to support rate limit durations longer than a minute, day or month, calls to those rate limits that have durations over a self determined limit. ### API -All methods are accessed via GRPC but are also exposed via HTTP using the -[GRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway) #### Health Check Health check returns `unhealthy` in the event a peer is reported by etcd or kubernetes as `up` but the server instance is unable to contact that peer via it's advertised address. -###### GRPC -```grpc -rpc HealthCheck (HealthCheckReq) returns (HealthCheckResp) -``` - -###### HTTP ``` -GET /v1/HealthCheck +GET /v1/health.check ``` Example response: @@ -269,14 +262,8 @@ Rate limits can be applied or retrieved using this interface. If the client makes a request to the server with `hits: 0` then current state of the rate limit is retrieved but not incremented. -###### GRPC -```grpc -rpc GetRateLimits (GetRateLimitsReq) returns (GetRateLimitsResp) ``` - -###### HTTP -``` -POST /v1/GetRateLimits +POST /v1/rate-limit.check ``` Example Payload @@ -285,7 +272,7 @@ Example Payload "requests": [ { "name": "requests_per_sec", - "uniqueKey": "account:12345", + "unique_key": "account:12345", "hits": "1", "limit": "10", "duration": "1000" @@ -314,20 +301,10 @@ Example response: ``` ### Deployment -NOTE: Gubernator uses `etcd`, Kubernetes or round-robin DNS to discover peers and +NOTE: Gubernator uses `memberlist` Kubernetes or round-robin DNS to discover peers and establish a cluster. If you don't have either, the docker-compose method is the simplest way to try gubernator out. - -##### Docker with existing etcd cluster -```bash -$ docker run -p 8081:81 -p 9080:80 -e GUBER_ETCD_ENDPOINTS=etcd1:2379,etcd2:2379 \ - ghcr.io/gubernator-io/gubernator:latest - -# Hit the HTTP API at localhost:9080 -$ curl http://localhost:9080/v1/HealthCheck -``` - ##### Kubernetes ```bash # Download the kubernetes deployment spec @@ -346,14 +323,15 @@ you can use same fully-qualified domain name to both let your business logic con instances to find `gubernator` and for `gubernator` containers/instances to find each other. ##### TLS -Gubernator supports TLS for both HTTP and GRPC connections. You can see an example with -self signed certs by running `docker-compose-tls.yaml` +Gubernator supports TLS. You can see an example with self-signed certs by running +`docker-compose-tls.yaml` ```bash # Run docker compose $ docker-compose -f docker-compose-tls.yaml up -d -# Hit the HTTP API at localhost:9080 (GRPC is at 9081) -$ curl --cacert certs/ca.cert --cert certs/gubernator.pem --key certs/gubernator.key https://localhost:9080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 ++$ curl -X POST --cacert certs/ca.cert --cert certs/gubernator.pem \ + --key certs/gubernator.key https://localhost:9080/v1/health.check ``` ### Configuration diff --git a/algorithms.go b/algorithms.go index c923161..ac17949 100644 --- a/algorithms.go +++ b/algorithms.go @@ -20,7 +20,6 @@ import ( "context" "github.com/mailgun/holster/v4/clock" - "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -34,10 +33,7 @@ import ( // with 100 emails and the request will succeed. You can override this default behavior with `DRAIN_OVER_LIMIT` // Implements token bucket algorithm for rate limiting. https://en.wikipedia.org/wiki/Token_bucket -func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqState RateLimitReqState) (resp *RateLimitResp, err error) { - tokenBucketTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("tokenBucket")) - defer tokenBucketTimer.ObserveDuration() - +func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitRequest, reqState RateLimitRequestState) (resp *RateLimitResponse, err error) { // Get rate limit from cache. hashKey := r.HashKey() item, ok := c.GetItem(hashKey) @@ -81,7 +77,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqStat if s != nil { s.Remove(ctx, hashKey) } - return &RateLimitResp{ + return &RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: r.Limit, Remaining: r.Limit, @@ -112,7 +108,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqStat t.Limit = r.Limit } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Status: t.Status, Limit: r.Limit, Remaining: t.Remaining, @@ -203,7 +199,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqStat } // Called by tokenBucket() when adding a new item in the store. -func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqState RateLimitReqState) (resp *RateLimitResp, err error) { +func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitRequest, reqState RateLimitRequestState) (resp *RateLimitResponse, err error) { createdAt := *r.CreatedAt expire := createdAt + r.Duration @@ -229,7 +225,7 @@ func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq, ExpireAt: expire, } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: r.Limit, Remaining: t.Remaining, @@ -257,10 +253,7 @@ func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq, } // Implements leaky bucket algorithm for rate limiting https://en.wikipedia.org/wiki/Leaky_bucket -func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqState RateLimitReqState) (resp *RateLimitResp, err error) { - leakyBucketTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getRateLimit_leakyBucket")) - defer leakyBucketTimer.ObserveDuration() - +func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitRequest, reqState RateLimitRequestState) (resp *RateLimitResponse, err error) { if r.Burst == 0 { r.Burst = r.Limit } @@ -370,7 +363,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqStat b.Remaining = float64(b.Burst) } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Limit: b.Limit, Remaining: int64(b.Remaining), Status: Status_UNDER_LIMIT, @@ -434,7 +427,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqStat } // Called by leakyBucket() when adding a new item in the store. -func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq, reqState RateLimitReqState) (resp *RateLimitResp, err error) { +func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitRequest, reqState RateLimitRequestState) (resp *RateLimitResponse, err error) { createdAt := *r.CreatedAt duration := r.Duration rate := float64(duration) / float64(r.Limit) @@ -458,7 +451,7 @@ func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq, Burst: r.Burst, } - rl := RateLimitResp{ + rl := RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: b.Limit, Remaining: r.Burst - r.Hits, diff --git a/benchmark_cache_test.go b/benchmark_cache_test.go index e19ee7e..644fd22 100644 --- a/benchmark_cache_test.go +++ b/benchmark_cache_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" ) diff --git a/benchmark_test.go b/benchmark_test.go index 6adc92f..58016be 100644 --- a/benchmark_test.go +++ b/benchmark_test.go @@ -18,42 +18,67 @@ package gubernator_test import ( "context" + "fmt" + "os" "testing" - guber "github.com/gubernator-io/gubernator/v2" - "github.com/gubernator-io/gubernator/v2/cluster" + guber "github.com/gubernator-io/gubernator/v3" + "github.com/gubernator-io/gubernator/v3/cluster" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/syncutil" "github.com/stretchr/testify/require" ) +// go test benchmark_test.go -bench=BenchmarkTrace -benchtime=20s -trace=trace.out +// go tool trace trace.out +func BenchmarkTrace(b *testing.B) { + if err := cluster.StartWith([]guber.PeerInfo{ + {HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, + + // DataCenterOne + {HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, + }); err != nil { + fmt.Println(err) + os.Exit(1) + } + defer cluster.Stop(context.Background()) +} + func BenchmarkServer(b *testing.B) { ctx := context.Background() conf := guber.Config{} err := conf.SetDefaults() require.NoError(b, err, "Error in conf.SetDefaults") createdAt := epochMillis(clock.Now()) + d := cluster.GetRandomDaemon() + client := d.MustClient().(guber.PeerClient) b.Run("GetPeerRateLimit", func(b *testing.B) { - client, err := guber.NewPeerClient(guber.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), - Behavior: conf.Behaviors, - }) - if err != nil { - b.Errorf("Error building client: %s", err) - } b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := client.GetPeerRateLimit(ctx, &guber.RateLimitReq{ - Name: b.Name(), - UniqueKey: guber.RandomString(10), - // Behavior: guber.Behavior_NO_BATCHING, - Limit: 10, - Duration: 5, - Hits: 1, - CreatedAt: &createdAt, - }) + var resp guber.ForwardResponse + err := client.Forward(ctx, &guber.ForwardRequest{ + Requests: []*guber.RateLimitRequest{ + { + Name: b.Name(), + UniqueKey: guber.RandomString(10), + // Behavior: guber.Behavior_NO_BATCHING, + Limit: 10, + Duration: 5, + Hits: 1, + CreatedAt: &createdAt, + }, + }, + }, &resp) if err != nil { b.Errorf("Error in client.GetPeerRateLimit: %s", err) } @@ -61,13 +86,14 @@ func BenchmarkServer(b *testing.B) { }) b.Run("GetRateLimits batching", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client := cluster.GetRandomDaemon().MustClient() require.NoError(b, err, "Error in guber.DialV1Server") b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: b.Name(), UniqueKey: guber.RandomString(10), @@ -76,7 +102,7 @@ func BenchmarkServer(b *testing.B) { Hits: 1, }, }, - }) + }, &resp) if err != nil { b.Errorf("Error in client.GetRateLimits(): %s", err) } @@ -84,13 +110,14 @@ func BenchmarkServer(b *testing.B) { }) b.Run("GetRateLimits global", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client := cluster.GetRandomDaemon().MustClient() require.NoError(b, err, "Error in guber.DialV1Server") b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: b.Name(), UniqueKey: guber.RandomString(10), @@ -100,7 +127,7 @@ func BenchmarkServer(b *testing.B) { Hits: 1, }, }, - }) + }, &resp) if err != nil { b.Errorf("Error in client.GetRateLimits: %s", err) } @@ -108,27 +135,29 @@ func BenchmarkServer(b *testing.B) { }) b.Run("HealthCheck", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client := cluster.GetRandomDaemon().MustClient() require.NoError(b, err, "Error in guber.DialV1Server") b.ResetTimer() for n := 0; n < b.N; n++ { - if _, err := client.HealthCheck(ctx, &guber.HealthCheckReq{}); err != nil { + var resp guber.HealthCheckResponse + if err := client.HealthCheck(ctx, &resp); err != nil { b.Errorf("Error in client.HealthCheck: %s", err) } } }) b.Run("Thundering herd", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client := cluster.GetRandomDaemon().MustClient() require.NoError(b, err, "Error in guber.DialV1Server") b.ResetTimer() fan := syncutil.NewFanOut(100) for n := 0; n < b.N; n++ { fan.Run(func(o interface{}) error { - _, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: b.Name(), UniqueKey: guber.RandomString(10), @@ -137,7 +166,7 @@ func BenchmarkServer(b *testing.B) { Hits: 1, }, }, - }) + }, &resp) if err != nil { b.Errorf("Error in client.GetRateLimits: %s", err) } diff --git a/buf.gen.yaml b/buf.gen.yaml index 5c62f51..b928594 100755 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -5,18 +5,5 @@ plugins: - plugin: buf.build/protocolbuffers/go:v1.32.0 out: ./ opt: paths=source_relative - - plugin: buf.build/grpc/go:v1.3.0 - out: ./ - opt: - - paths=source_relative - - require_unimplemented_servers=false - - plugin: buf.build/grpc-ecosystem/gateway:v2.18.0 # same version in go.mod - out: ./ - opt: - - paths=source_relative - - logtostderr=true - - generate_unbound_methods=true - - plugin: buf.build/grpc/python:v1.57.0 - out: ./python/gubernator - plugin: buf.build/protocolbuffers/python out: ./python/gubernator diff --git a/buf.yaml b/buf.yaml index b6d1351..38bfc22 100644 --- a/buf.yaml +++ b/buf.yaml @@ -8,7 +8,4 @@ breaking: - FILE lint: use: - - DEFAULT - rpc_allow_same_request_response: false - rpc_allow_google_protobuf_empty_requests: true - rpc_allow_google_protobuf_empty_responses: true \ No newline at end of file + - DEFAULT \ No newline at end of file diff --git a/client.go b/client.go index f989669..6112f5c 100644 --- a/client.go +++ b/client.go @@ -17,17 +17,24 @@ limitations under the License. package gubernator import ( + "bytes" + "context" crand "crypto/rand" "crypto/tls" + "fmt" "math/rand" + "net" + "net/http" "time" + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" "github.com/mailgun/holster/v4/clock" + "github.com/mailgun/holster/v4/setter" "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" + "go.opentelemetry.io/otel/propagation" + "golang.org/x/net/http2" + "google.golang.org/protobuf/proto" ) const ( @@ -36,32 +43,158 @@ const ( Minute = 60 * Second ) -func (m *RateLimitReq) HashKey() string { +type Client interface { + CheckRateLimits(context.Context, *CheckRateLimitsRequest, *CheckRateLimitsResponse) error + HealthCheck(context.Context, *HealthCheckResponse) error +} + +func (m *RateLimitRequest) HashKey() string { return m.Name + "_" + m.UniqueKey } -// DialV1Server is a convenience function for dialing gubernator instances -func DialV1Server(server string, tls *tls.Config) (V1Client, error) { - if len(server) == 0 { - return nil, errors.New("server is empty; must provide a server") +type ClientOptions struct { + // Users can provide their own http client with TLS config if needed + Client *http.Client + // The address of endpoint in the format `://:` + Endpoint string +} + +type client struct { + *duh.Client + prop propagation.TraceContext + opts ClientOptions +} + +// NewClient creates a new instance of the Gubernator user client +func NewClient(opts ClientOptions) (Client, error) { + setter.SetDefault(&opts.Client, DefaultHTTPClient) + + if len(opts.Endpoint) == 0 { + return nil, errors.New("opts.Endpoint is empty; must provide an address") + } + + return &client{ + Client: &duh.Client{ + Client: opts.Client, + }, + opts: opts, + }, nil +} + +func NewPeerClient(opts ClientOptions) PeerClient { + return &client{ + Client: &duh.Client{ + Client: opts.Client, + }, + opts: opts, + } +} + +func (c *client) CheckRateLimits(ctx context.Context, req *CheckRateLimitsRequest, resp *CheckRateLimitsResponse) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) } - // Setup OpenTelemetry interceptor to propagate spans. - opts := []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler()), + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCRateLimitCheck), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) } - if tls != nil { - opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tls))) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) HealthCheck(ctx context.Context, resp *HealthCheckResponse) error { + payload, err := proto.Marshal(&HealthCheckRequest{}) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) } - conn, err := grpc.Dial(server, opts...) + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCHealthCheck), bytes.NewReader(payload)) if err != nil { - return nil, errors.Wrapf(err, "failed to dial server %s", server) + return duh.NewClientError(err, nil) } - return NewV1Client(conn), nil + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) Forward(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) + } + + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCPeerForward), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) + } + + c.prop.Inject(ctx, propagation.HeaderCarrier(r.Header)) + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) Update(ctx context.Context, req *UpdateRequest) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) + } + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCPeerUpdate), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) + } + + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, &v1.Reply{}) +} + +var ( + // DefaultHTTPClient enables H2C (HTTP/2 over Cleartext) + DefaultHTTPClient = &http.Client{ + Transport: &http2.Transport{ + // So http2.Transport doesn't complain the URL scheme isn't 'https' + AllowHTTP: true, + // Pretend we are dialing a TLS endpoint. (Note, we ignore the passed tls.Config) + DialTLSContext: func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } +) + +// WithNoTLS returns ClientOptions suitable for use with NON-TLS clients with H2C enabled. +func WithNoTLS(address string) ClientOptions { + return ClientOptions{ + Endpoint: fmt.Sprintf("http://%s", address), + Client: DefaultHTTPClient, + } +} + +// WithTLS returns ClientOptions suitable for use with NON-TLS clients with H2C enabled. +func WithTLS(tls *tls.Config, address string) ClientOptions { + return ClientOptions{ + Endpoint: fmt.Sprintf("https://%s", address), + Client: &http.Client{ + Transport: &http2.Transport{ + TLSClientConfig: tls, + }, + }, + } +} + +// WithDaemonConfig returns ClientOptions suitable for use by the Daemon +func WithDaemonConfig(conf DaemonConfig, address string) ClientOptions { + if conf.ClientTLS() == nil { + return WithNoTLS(address) + } + return WithTLS(conf.ClientTLS(), address) } // ToTimeStamp is a convenience function to convert a time.Duration diff --git a/cluster/cluster.go b/cluster/cluster.go index 3fef87e..f3368fd 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -21,7 +21,7 @@ import ( "fmt" "math/rand" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/errors" "github.com/sirupsen/logrus" @@ -36,8 +36,14 @@ const ( var daemons []*gubernator.Daemon var peers []gubernator.PeerInfo -// GetRandomPeer returns a random peer from the cluster -func GetRandomPeer(dc string) gubernator.PeerInfo { +// GetRandomPeerOptions returns gubernator.ClientOptions for a random peer in the cluster +func GetRandomPeerOptions(dc string) gubernator.ClientOptions { + info := GetRandomPeerInfo(dc) + return gubernator.WithNoTLS(info.HTTPAddress) +} + +// GetRandomPeerInfo returns a random peer from the cluster +func GetRandomPeerInfo(dc string) gubernator.PeerInfo { var local []gubernator.PeerInfo for _, p := range peers { @@ -53,6 +59,11 @@ func GetRandomPeer(dc string) gubernator.PeerInfo { return local[rand.Intn(len(local))] } +// GetRandomDaemon returns a random daemon from the cluster +func GetRandomDaemon() *gubernator.Daemon { + return daemons[rand.Intn(len(daemons))] +} + // GetPeers returns a list of all peers in the cluster func GetPeers() []gubernator.PeerInfo { return peers @@ -70,7 +81,7 @@ func PeerAt(idx int) gubernator.PeerInfo { // FindOwningPeer finds the peer which owns the rate limit with the provided name and unique key func FindOwningPeer(name, key string) (gubernator.PeerInfo, error) { - p, err := daemons[0].V1Server.GetPeer(context.Background(), name+"_"+key) + p, err := daemons[0].Service.GetPeer(context.Background(), name+"_"+key) if err != nil { return gubernator.PeerInfo{}, err } @@ -79,13 +90,13 @@ func FindOwningPeer(name, key string) (gubernator.PeerInfo, error) { // FindOwningDaemon finds the daemon which owns the rate limit with the provided name and unique key func FindOwningDaemon(name, key string) (*gubernator.Daemon, error) { - p, err := daemons[0].V1Server.GetPeer(context.Background(), name+"_"+key) + p, err := daemons[0].Service.GetPeer(context.Background(), name+"_"+key) if err != nil { return &gubernator.Daemon{}, err } for i, d := range daemons { - if d.PeerInfo.GRPCAddress == p.Info().GRPCAddress { + if d.Config().HTTPListenAddress == p.Info().HTTPAddress { return daemons[i], nil } } @@ -102,7 +113,7 @@ func ListNonOwningDaemons(name, key string) ([]*gubernator.Daemon, error) { var daemons []*gubernator.Daemon for _, d := range GetDaemons() { - if d.PeerInfo.GRPCAddress != owner.PeerInfo.GRPCAddress { + if d.Config().HTTPListenAddress != owner.Config().HTTPListenAddress { daemons = append(daemons, d) } } @@ -121,16 +132,15 @@ func NumOfDaemons() int { // Start a local cluster of gubernator servers func Start(numInstances int) error { - // Ideally we should let the socket choose the port, but then + // Ideally, we should let the socket choose the port, but then // some things like the logger will not be set correctly. var peers []gubernator.PeerInfo port := 1111 for i := 0; i < numInstances; i++ { peers = append(peers, gubernator.PeerInfo{ HTTPAddress: fmt.Sprintf("localhost:%d", port), - GRPCAddress: fmt.Sprintf("localhost:%d", port+1), }) - port += 2 + port += 1 } return StartWith(peers) } @@ -138,7 +148,7 @@ func Start(numInstances int) error { // Restart the cluster func Restart(ctx context.Context) error { for i := 0; i < len(daemons); i++ { - daemons[i].Close() + _ = daemons[i].Close(ctx) if err := daemons[i].Start(ctx); err != nil { return err } @@ -152,9 +162,8 @@ func StartWith(localPeers []gubernator.PeerInfo) error { for _, peer := range localPeers { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) d, err := gubernator.SpawnDaemon(ctx, gubernator.DaemonConfig{ - Logger: logrus.WithField("instance", peer.GRPCAddress), - InstanceID: peer.GRPCAddress, - GRPCListenAddress: peer.GRPCAddress, + Logger: logrus.WithField("instance", peer.HTTPAddress), + InstanceID: peer.HTTPAddress, HTTPListenAddress: peer.HTTPAddress, DataCenter: peer.DataCenter, Behaviors: gubernator.BehaviorConfig{ @@ -166,18 +175,11 @@ func StartWith(localPeers []gubernator.PeerInfo) error { }) cancel() if err != nil { - return errors.Wrapf(err, "while starting server for addr '%s'", peer.GRPCAddress) - } - - p := gubernator.PeerInfo{ - GRPCAddress: d.GRPCListeners[0].Addr().String(), - HTTPAddress: d.HTTPListener.Addr().String(), - DataCenter: peer.DataCenter, + return fmt.Errorf("while starting server for addr '%s': %w", peer.HTTPAddress, err) } - d.PeerInfo = p // Add the peers and daemons to the package level variables - peers = append(peers, p) + peers = append(peers, d.PeerInfo) daemons = append(daemons, d) } @@ -189,9 +191,9 @@ func StartWith(localPeers []gubernator.PeerInfo) error { } // Stop all daemons in the cluster -func Stop() { +func Stop(ctx context.Context) { for _, d := range daemons { - d.Close() + _ = d.Close(ctx) } peers = nil daemons = nil diff --git a/cluster/cluster_test.go b/cluster/cluster_test.go index 16f0c7f..20d579d 100644 --- a/cluster/cluster_test.go +++ b/cluster/cluster_test.go @@ -17,10 +17,11 @@ limitations under the License. package cluster_test import ( + "context" "testing" - gubernator "github.com/gubernator-io/gubernator/v2" - "github.com/gubernator-io/gubernator/v2/cluster" + "github.com/gubernator-io/gubernator/v3" + "github.com/gubernator-io/gubernator/v3/cluster" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -32,7 +33,9 @@ func TestStartMultipleInstances(t *testing.T) { }) err := cluster.Start(2) require.NoError(t, err) - t.Cleanup(cluster.Stop) + t.Cleanup(func() { + cluster.Stop(context.Background()) + }) assert.Equal(t, 2, len(cluster.GetPeers())) assert.Equal(t, 2, len(cluster.GetDaemons())) @@ -41,7 +44,7 @@ func TestStartMultipleInstances(t *testing.T) { func TestStartOneInstance(t *testing.T) { err := cluster.Start(1) require.NoError(t, err) - defer cluster.Stop() + defer cluster.Stop(context.Background()) assert.Equal(t, 1, len(cluster.GetPeers())) assert.Equal(t, 1, len(cluster.GetDaemons())) @@ -49,28 +52,28 @@ func TestStartOneInstance(t *testing.T) { func TestStartMultipleDaemons(t *testing.T) { peers := []gubernator.PeerInfo{ - {GRPCAddress: "localhost:1111", HTTPAddress: "localhost:1112"}, - {GRPCAddress: "localhost:2222", HTTPAddress: "localhost:2221"}} + {HTTPAddress: "localhost:1112"}, + {HTTPAddress: "localhost:2221"}} err := cluster.StartWith(peers) require.NoError(t, err) - defer cluster.Stop() + defer cluster.Stop(context.Background()) wantPeers := []gubernator.PeerInfo{ - {GRPCAddress: "127.0.0.1:1111", HTTPAddress: "127.0.0.1:1112"}, - {GRPCAddress: "127.0.0.1:2222", HTTPAddress: "127.0.0.1:2221"}, + {HTTPAddress: "127.0.0.1:1112"}, + {HTTPAddress: "127.0.0.1:2221"}, } daemons := cluster.GetDaemons() assert.Equal(t, wantPeers, cluster.GetPeers()) assert.Equal(t, 2, len(daemons)) - assert.Equal(t, "127.0.0.1:1111", daemons[0].GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", daemons[1].GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", cluster.PeerAt(1).GRPCAddress) + assert.Equal(t, "127.0.0.1:1111", daemons[0].Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", daemons[1].Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", cluster.PeerAt(1).HTTPAddress) } func TestStartWithInvalidPeer(t *testing.T) { - err := cluster.StartWith([]gubernator.PeerInfo{{GRPCAddress: "1111"}}) + err := cluster.StartWith([]gubernator.PeerInfo{{HTTPAddress: "1111"}}) assert.NotNil(t, err) assert.Nil(t, cluster.GetPeers()) assert.Nil(t, cluster.GetDaemons()) diff --git a/cmd/gubernator-cli/main.go b/cmd/gubernator-cli/main.go index 6541f6a..20ead97 100644 --- a/cmd/gubernator-cli/main.go +++ b/cmd/gubernator-cli/main.go @@ -26,7 +26,7 @@ import ( "time" "github.com/davecgh/go-spew/spew" - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/errors" "github.com/mailgun/holster/v4/setter" @@ -40,7 +40,7 @@ import ( var ( log *logrus.Logger - configFile, grpcAddress string + configFile, httpAddress string concurrency uint64 timeout time.Duration checksPerRequest uint64 @@ -51,7 +51,7 @@ var ( func main() { log = logrus.StandardLogger() flag.StringVar(&configFile, "config", "", "Environment config file") - flag.StringVar(&grpcAddress, "e", "", "Gubernator GRPC endpoint address") + flag.StringVar(&httpAddress, "e", "", "Gubernator HTTP endpoint address") flag.Uint64Var(&concurrency, "concurrency", 1, "Concurrent threads (default 1)") flag.DurationVar(&timeout, "timeout", 100*time.Millisecond, "Request timeout (default 100ms)") flag.Uint64Var(&checksPerRequest, "checks", 1, "Rate checks per request (default 1)") @@ -70,7 +70,7 @@ func main() { } ctx := context.Background() err = tracing.InitTracing(ctx, - "github.com/gubernator-io/gubernator/v2/cmd/gubernator-cli", + "github.com/gubernator-io/gubernator/v3/cmd/gubernator-cli", tracing.WithResource(res), ) if err != nil { @@ -83,7 +83,7 @@ func main() { log.Info(argsMsg) tracing.EndScope(startCtx, nil) - var client guber.V1Client + var client guber.Client err = tracing.CallScope(ctx, func(ctx context.Context) error { // Print startup message. cmdLine := strings.Join(os.Args[1:], " ") @@ -97,9 +97,9 @@ func main() { if err != nil { return err } - setter.SetOverride(&conf.GRPCListenAddress, grpcAddress) + setter.SetOverride(&conf.HTTPListenAddress, httpAddress) - if configFile == "" && grpcAddress == "" && os.Getenv("GUBER_GRPC_ADDRESS") == "" { + if configFile == "" && httpAddress == "" && os.Getenv("GUBER_GRPC_ADDRESS") == "" { return errors.New("please provide a GRPC endpoint via -e or from a config " + "file via -config or set the env GUBER_GRPC_ADDRESS") } @@ -109,18 +109,18 @@ func main() { return err } - log.WithContext(ctx).Infof("Connecting to '%s'...", conf.GRPCListenAddress) - client, err = guber.DialV1Server(conf.GRPCListenAddress, conf.ClientTLS()) + log.WithContext(ctx).Infof("Connecting to '%s'...", conf.HTTPListenAddress) + client, err = guber.NewClient(guber.WithDaemonConfig(conf, conf.HTTPListenAddress)) return err }) checkErr(err) // Generate a selection of rate limits with random limits. - var rateLimits []*guber.RateLimitReq + var rateLimits []*guber.RateLimitRequest for i := 0; i < 2000; i++ { - rateLimits = append(rateLimits, &guber.RateLimitReq{ + rateLimits = append(rateLimits, &guber.RateLimitRequest{ Name: fmt.Sprintf("gubernator-cli-%d", i), UniqueKey: guber.RandomString(10), Hits: 1, @@ -142,12 +142,12 @@ func main() { // Replay requests in endless loop. for { for i := int(0); i < len(rateLimits); i += int(checksPerRequest) { - req := &guber.GetRateLimitsReq{ + req := &guber.CheckRateLimitsRequest{ Requests: rateLimits[i:min(i+int(checksPerRequest), len(rateLimits))], } fan.Run(func(obj interface{}) error { - req := obj.(*guber.GetRateLimitsReq) + req := obj.(*guber.CheckRateLimitsRequest) if reqRate > 0 { _ = limiter.Wait(ctx) @@ -178,49 +178,45 @@ func randInt(min, max int) int { return rand.Intn(max-min) + min } -func sendRequest(ctx context.Context, client guber.V1Client, req *guber.GetRateLimitsReq) { +func sendRequest(ctx context.Context, client guber.Client, req *guber.CheckRateLimitsRequest) { ctx = tracing.StartScope(ctx) defer tracing.EndScope(ctx, nil) ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() // Now hit our cluster with the rate limits - resp, err := client.GetRateLimits(ctx, req) - cancel() - if err != nil { + var resp guber.CheckRateLimitsResponse + if err := client.CheckRateLimits(ctx, req, &resp); err != nil { log.WithContext(ctx).WithError(err).Error("Error in client.GetRateLimits") return } // Sanity checks. - if resp == nil { - log.WithContext(ctx).Error("Response object is unexpectedly nil") - return - } if resp.Responses == nil { log.WithContext(ctx).Error("Responses array is unexpectedly nil") return } - // Check for overlimit response. - overlimit := false + // Check for over limit response. + overLimit := false for itemNum, resp := range resp.Responses { if resp.Status == guber.Status_OVER_LIMIT { - overlimit = true + overLimit = true log.WithContext(ctx).WithField("name", req.Requests[itemNum].Name). Info("Overlimit!") } } - if overlimit { + if overLimit { span := trace.SpanFromContext(ctx) span.SetAttributes( attribute.Bool("overlimit", true), ) if !quiet { - dumpResp := spew.Sdump(resp) + dumpResp := spew.Sdump(&resp) log.WithContext(ctx).Info(dumpResp) } } diff --git a/cmd/gubernator-cluster/main.go b/cmd/gubernator-cluster/main.go index 49f23ca..278d4bb 100644 --- a/cmd/gubernator-cluster/main.go +++ b/cmd/gubernator-cluster/main.go @@ -17,12 +17,13 @@ limitations under the License. package main import ( + "context" "fmt" "os" "os/signal" - "github.com/gubernator-io/gubernator/v2" - "github.com/gubernator-io/gubernator/v2/cluster" + "github.com/gubernator-io/gubernator/v3" + "github.com/gubernator-io/gubernator/v3/cluster" "github.com/sirupsen/logrus" ) @@ -31,12 +32,12 @@ func main() { logrus.SetLevel(logrus.InfoLevel) // Start a local cluster err := cluster.StartWith([]gubernator.PeerInfo{ - {GRPCAddress: "127.0.0.1:9990", HTTPAddress: "127.0.0.1:9980"}, - {GRPCAddress: "127.0.0.1:9991", HTTPAddress: "127.0.0.1:9981"}, - {GRPCAddress: "127.0.0.1:9992", HTTPAddress: "127.0.0.1:9982"}, - {GRPCAddress: "127.0.0.1:9993", HTTPAddress: "127.0.0.1:9983"}, - {GRPCAddress: "127.0.0.1:9994", HTTPAddress: "127.0.0.1:9984"}, - {GRPCAddress: "127.0.0.1:9995", HTTPAddress: "127.0.0.1:9985"}, + {HTTPAddress: "127.0.0.1:9980"}, + {HTTPAddress: "127.0.0.1:9981"}, + {HTTPAddress: "127.0.0.1:9982"}, + {HTTPAddress: "127.0.0.1:9983"}, + {HTTPAddress: "127.0.0.1:9984"}, + {HTTPAddress: "127.0.0.1:9985"}, }) if err != nil { panic(err) @@ -49,7 +50,7 @@ func main() { signal.Notify(c, os.Interrupt) for sig := range c { if sig == os.Interrupt { - cluster.Stop() + cluster.Stop(context.Background()) os.Exit(0) } } diff --git a/cmd/gubernator/main.go b/cmd/gubernator/main.go index 8b54023..e64db6e 100644 --- a/cmd/gubernator/main.go +++ b/cmd/gubernator/main.go @@ -27,7 +27,7 @@ import ( "strings" "syscall" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/tracing" "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/sdk/resource" @@ -83,7 +83,7 @@ func Main(ctx context.Context) error { // Initialize tracing. err = tracing.InitTracing(ctx, - "github.com/gubernator-io/gubernator/v2", + "github.com/gubernator-io/gubernator/v3", tracing.WithLevel(gubernator.GetTracingLevel()), tracing.WithResource(res), ) @@ -117,7 +117,7 @@ func Main(ctx context.Context) error { select { case <-c: log.Info("caught signal; shutting down") - daemon.Close() + _ = daemon.Close(context.Background()) _ = tracing.CloseTracing(context.Background()) return nil case <-ctx.Done(): diff --git a/cmd/gubernator/main_test.go b/cmd/gubernator/main_test.go index 8c4e10e..65337da 100644 --- a/cmd/gubernator/main_test.go +++ b/cmd/gubernator/main_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - cli "github.com/gubernator-io/gubernator/v2/cmd/gubernator" + cli "github.com/gubernator-io/gubernator/v3/cmd/gubernator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/proxy" @@ -45,11 +45,10 @@ func TestCLI(t *testing.T) { { name: "Should start with no config provided", env: []string{ - "GUBER_GRPC_ADDRESS=localhost:8080", - "GUBER_HTTP_ADDRESS=localhost:8081", + "GUBER_HTTP_ADDRESS=localhost:8080", }, args: []string{}, - contains: "HTTP Gateway Listening on", + contains: "HTTP Listening on", }, } for _, tt := range tests { diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index c9d72d2..4e7dfe7 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -23,7 +23,7 @@ import ( "net/http" "os" - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" ) func main() { @@ -31,7 +31,8 @@ func main() { if url == "" { url = "localhost:80" } - resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/v1/HealthCheck", url)) + + resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/healthz", url)) if err != nil { panic(err) } @@ -42,7 +43,7 @@ func main() { panic(err) } - var hc guber.HealthCheckResp + var hc guber.HealthCheckResponse if err := json.Unmarshal(body, &hc); err != nil { panic(err) } diff --git a/config.go b/config.go index 19f9f06..73fe997 100644 --- a/config.go +++ b/config.go @@ -42,7 +42,6 @@ import ( "github.com/sirupsen/logrus" etcd "go.etcd.io/etcd/client/v3" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" ) // BehaviorConfig controls the handling of rate limits in the cluster @@ -73,8 +72,8 @@ type BehaviorConfig struct { type Config struct { InstanceID string - // (Required) A list of GRPC servers to register our instance with - GRPCServers []*grpc.Server + // (Optional) The PeerClient gubernator should use when making requests to other peers in the cluster. + PeerClientFactory func(PeerInfo) PeerClient // (Optional) Adjust how gubernator behaviors are configured Behaviors BehaviorConfig @@ -108,11 +107,9 @@ type Config struct { // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) Logger FieldLogger + // TODO(thrawn01): Fix Peer TLS // (Optional) The TLS config used when connecting to gubernator peers - PeerTLS *tls.Config - - // (Optional) If true, will emit traces for GRPC client requests to other peers - PeerTraceGRPC bool + // PeerTLS *tls.Config // (Optional) The number of go routine workers used to process concurrent rate limit requests // Default is set to number of CPUs. @@ -133,7 +130,7 @@ func (c *Config) SetDefaults() error { setter.SetDefault(&c.Behaviors.GlobalPeerRequestsConcurrency, 100) - setter.SetDefault(&c.LocalPicker, NewReplicatedConsistentHash(nil, defaultReplicas)) + setter.SetDefault(&c.LocalPicker, NewReplicatedConsistentHash(nil, DefaultReplicas)) setter.SetDefault(&c.RegionPicker, NewRegionPicker(nil)) setter.SetDefault(&c.CacheSize, 50_000) @@ -151,9 +148,10 @@ func (c *Config) SetDefaults() error { } // Make a copy of the TLS config in case our caller decides to make changes - if c.PeerTLS != nil { - c.PeerTLS = c.PeerTLS.Clone() - } + // TODO(thrawn01): Fix Peer TLS + //if c.PeerTLS != nil { + // c.PeerTLS = c.PeerTLS.Clone() + //} return nil } @@ -163,15 +161,13 @@ type PeerInfo struct { DataCenter string `json:"data-center"` // (Optional) The http address:port of the peer HTTPAddress string `json:"http-address"` - // (Required) The grpc address:port of the peer - GRPCAddress string `json:"grpc-address"` // (Optional) Is true if PeerInfo is for this instance of gubernator IsOwner bool `json:"is-owner,omitempty"` } // HashKey returns the hash key used to identify this peer in the Picker. func (p PeerInfo) HashKey() string { - return p.GRPCAddress + return p.HTTPAddress } type UpdateFunc func([]PeerInfo) @@ -179,9 +175,6 @@ type UpdateFunc func([]PeerInfo) var DebugEnabled = false type DaemonConfig struct { - // (Required) The `address:port` that will accept GRPC requests - GRPCListenAddress string - // (Required) The `address:port` that will accept HTTP requests HTTPListenAddress string @@ -192,10 +185,6 @@ type DaemonConfig struct { // provide client certificate but you want to enforce mTLS in other RPCs (like in K8s) HTTPStatusListenAddress string - // (Optional) Defines the max age connection from client in seconds. - // Default is infinity - GRPCMaxConnectionAgeSeconds int - // (Optional) The `address:port` that is advertised to other Gubernator peers. // Defaults to `GRPCListenAddress` AdvertiseAddress string @@ -236,6 +225,16 @@ type DaemonConfig struct { // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) Logger FieldLogger + // (Optional) A loader from a persistent store. Allows the implementor the ability to load and save + // the contents of the cache when the gubernator instance is started and stopped + Loader Loader + + // (Optional) A persistent store implementation. Allows the implementor the ability to store the rate limits this + // instance of gubernator owns. It's up to the implementor to decide what rate limits to persist. + // For instance, an implementor might only persist rate limits that have an expiration of + // longer than 1 hour. + Store Store + // (Optional) TLS Configuration; SpawnDaemon() will modify the passed TLS config in an // attempt to build a complete TLS config if one is not provided. TLS *TLSConfig @@ -310,16 +309,13 @@ func SetupDaemonConfig(logger *logrus.Logger, configFile io.Reader) (DaemonConfi } // Main config - setter.SetDefault(&conf.GRPCListenAddress, os.Getenv("GUBER_GRPC_ADDRESS"), - fmt.Sprintf("%s:81", LocalHost())) setter.SetDefault(&conf.HTTPListenAddress, os.Getenv("GUBER_HTTP_ADDRESS"), fmt.Sprintf("%s:80", LocalHost())) setter.SetDefault(&conf.InstanceID, GetInstanceID()) setter.SetDefault(&conf.HTTPStatusListenAddress, os.Getenv("GUBER_STATUS_HTTP_ADDRESS"), "") - setter.SetDefault(&conf.GRPCMaxConnectionAgeSeconds, getEnvInteger(log, "GUBER_GRPC_MAX_CONN_AGE_SEC"), 0) setter.SetDefault(&conf.CacheSize, getEnvInteger(log, "GUBER_CACHE_SIZE"), 50_000) setter.SetDefault(&conf.Workers, getEnvInteger(log, "GUBER_WORKER_COUNT"), 0) - setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.GRPCListenAddress) + setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.HTTPListenAddress) setter.SetDefault(&conf.DataCenter, os.Getenv("GUBER_DATA_CENTER"), "") setter.SetDefault(&conf.MetricFlags, getEnvMetricFlags(log, "GUBER_METRIC_FLAGS")) @@ -392,10 +388,10 @@ func SetupDaemonConfig(logger *logrus.Logger, configFile io.Reader) (DaemonConfi setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.DialTimeout, getEnvDuration(log, "GUBER_ETCD_DIAL_TIMEOUT"), clock.Second*5) setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Username, os.Getenv("GUBER_ETCD_USER")) setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Password, os.Getenv("GUBER_ETCD_PASSWORD")) - setter.SetDefault(&conf.EtcdPoolConf.Advertise.GRPCAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.EtcdPoolConf.Advertise.HTTPAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) setter.SetDefault(&conf.EtcdPoolConf.Advertise.DataCenter, os.Getenv("GUBER_ETCD_DATA_CENTER"), conf.DataCenter) - setter.SetDefault(&conf.MemberListPoolConf.Advertise.GRPCAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.MemberListPoolConf.Advertise.HTTPAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) setter.SetDefault(&conf.MemberListPoolConf.MemberListAddress, os.Getenv("GUBER_MEMBERLIST_ADDRESS"), fmt.Sprintf("%s:7946", advAddr)) setter.SetDefault(&conf.MemberListPoolConf.KnownNodes, getEnvSlice("GUBER_MEMBERLIST_KNOWN_NODES"), []string{}) setter.SetDefault(&conf.MemberListPoolConf.Advertise.DataCenter, conf.DataCenter) @@ -424,7 +420,7 @@ func SetupDaemonConfig(logger *logrus.Logger, configFile io.Reader) (DaemonConfi switch pp { case "replicated-hash": - setter.SetDefault(&replicas, getEnvInteger(log, "GUBER_REPLICATED_HASH_REPLICAS"), defaultReplicas) + setter.SetDefault(&replicas, getEnvInteger(log, "GUBER_REPLICATED_HASH_REPLICAS"), DefaultReplicas) conf.Picker = NewReplicatedConsistentHash(nil, replicas) setter.SetDefault(&hash, os.Getenv("GUBER_PEER_PICKER_HASH"), "fnv1a") hashFuncs := map[string]HashString64{ diff --git a/config_test.go b/config_test.go index 290b329..65a4b25 100644 --- a/config_test.go +++ b/config_test.go @@ -10,24 +10,24 @@ import ( "github.com/stretchr/testify/require" ) -func TestParsesGrpcAddress(t *testing.T) { +func TestParsesAddress(t *testing.T) { os.Clearenv() s := ` # a comment -GUBER_GRPC_ADDRESS=10.10.10.10:9000` +GUBER_HTTP_ADDRESS=10.10.10.10:9000` daemonConfig, err := SetupDaemonConfig(logrus.StandardLogger(), strings.NewReader(s)) require.NoError(t, err) - require.Equal(t, "10.10.10.10:9000", daemonConfig.GRPCListenAddress) + require.Equal(t, "10.10.10.10:9000", daemonConfig.HTTPListenAddress) require.NotEmpty(t, daemonConfig.InstanceID) } -func TestDefaultGrpcAddress(t *testing.T) { +func TestDefaultAddress(t *testing.T) { os.Clearenv() s := ` # a comment` daemonConfig, err := SetupDaemonConfig(logrus.StandardLogger(), strings.NewReader(s)) require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s:81", LocalHost()), daemonConfig.GRPCListenAddress) + require.Equal(t, fmt.Sprintf("%s:80", LocalHost()), daemonConfig.HTTPListenAddress) require.NotEmpty(t, daemonConfig.InstanceID) } diff --git a/daemon.go b/daemon.go index fee5b69..74dbba4 100644 --- a/daemon.go +++ b/daemon.go @@ -20,58 +20,43 @@ import ( "context" "crypto/tls" "fmt" - "io" "log" "net" "net/http" "strings" "time" - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + "golang.org/x/net/proxy" + "github.com/mailgun/holster/v4/errors" "github.com/mailgun/holster/v4/etcdutil" "github.com/mailgun/holster/v4/setter" "github.com/mailgun/holster/v4/syncutil" - "github.com/mailgun/holster/v4/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" - "google.golang.org/protobuf/encoding/protojson" ) type Daemon struct { - GRPCListeners []net.Listener - HTTPListener net.Listener - V1Server *V1Instance - InstanceID string - PeerInfo PeerInfo - - log FieldLogger - logWriter *io.PipeWriter - pool PoolInterface - conf DaemonConfig - httpSrv *http.Server - httpSrvNoMTLS *http.Server - grpcSrvs []*grpc.Server - wg syncutil.WaitGroup - statsHandler *GRPCStatsHandler - promRegister *prometheus.Registry - gwCancel context.CancelFunc - instanceConf Config - client V1Client + wg syncutil.WaitGroup + httpServers []*http.Server + pool PoolInterface + conf DaemonConfig + Listener net.Listener + HealthListener net.Listener + PeerInfo PeerInfo + log FieldLogger + Service *Service + InstanceID string + client Client } // SpawnDaemon starts a new gubernator daemon according to the provided DaemonConfig. -// This function will block until the daemon responds to connections as specified -// by GRPCListenAddress and HTTPListenAddress +// This function will block until the daemon responds to connections to HTTPListenAddress func SpawnDaemon(ctx context.Context, conf DaemonConfig) (*Daemon, error) { - s := &Daemon{ InstanceID: conf.InstanceID, log: conf.Logger, @@ -80,409 +65,319 @@ func SpawnDaemon(ctx context.Context, conf DaemonConfig) (*Daemon, error) { return s, s.Start(ctx) } -func (s *Daemon) Start(ctx context.Context) error { +func (d *Daemon) Start(ctx context.Context) error { var err error - setter.SetDefault(&s.log, logrus.WithFields(logrus.Fields{ - "instance": s.conf.InstanceID, - "category": "gubernator", + setter.SetDefault(&d.log, logrus.WithFields(logrus.Fields{ + "service-id": d.conf.InstanceID, + "category": "gubernator", })) - s.promRegister = prometheus.NewRegistry() + registry := prometheus.NewRegistry() // The LRU cache for storing rate limits. cacheCollector := NewLRUCacheCollector() - if err := s.promRegister.Register(cacheCollector); err != nil { - return errors.Wrap(err, "during call to promRegister.Register()") - } - - cacheFactory := func(maxSize int) Cache { - cache := NewLRUCache(maxSize) - cacheCollector.AddCache(cache) - return cache - } - - // Handler to collect duration and API access metrics for GRPC - s.statsHandler = NewGRPCStatsHandler() - _ = s.promRegister.Register(s.statsHandler) - - var filters []otelgrpc.Option - // otelgrpc deprecated use of interceptors in v0.45.0 in favor of stats - // handlers to propagate trace context. - // However, stats handlers do not have a filter feature. - // See: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4575 - // if s.conf.TraceLevel != tracing.DebugLevel { - // filters = []otelgrpc.Option{ - // otelgrpc.WithInterceptorFilter(TraceLevelInfoFilter), - // } - // } - - opts := []grpc.ServerOption{ - grpc.StatsHandler(s.statsHandler), - grpc.MaxRecvMsgSize(1024 * 1024), - - // OpenTelemetry instrumentation on gRPC endpoints. - grpc.StatsHandler(otelgrpc.NewServerHandler(filters...)), - } + registry.MustRegister(cacheCollector) - if s.conf.GRPCMaxConnectionAgeSeconds > 0 { - opts = append(opts, grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionAge: time.Second * time.Duration(s.conf.GRPCMaxConnectionAgeSeconds), - MaxConnectionAgeGrace: time.Second * time.Duration(s.conf.GRPCMaxConnectionAgeSeconds), - })) - } - - if err := SetupTLS(s.conf.TLS); err != nil { + if err := SetupTLS(d.conf.TLS); err != nil { return err } - if s.conf.ServerTLS() != nil { - // Create two GRPC server instances, one for TLS and the other for the API Gateway - opts2 := append(opts, grpc.Creds(credentials.NewTLS(s.conf.ServerTLS()))) - s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(opts2...)) - } - s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(opts...)) - - // Registers a new gubernator instance with the GRPC server - s.instanceConf = Config{ - PeerTraceGRPC: s.conf.TraceLevel >= tracing.DebugLevel, - PeerTLS: s.conf.ClientTLS(), - DataCenter: s.conf.DataCenter, - LocalPicker: s.conf.Picker, - GRPCServers: s.grpcSrvs, - Logger: s.log, - CacheFactory: cacheFactory, - Behaviors: s.conf.Behaviors, - CacheSize: s.conf.CacheSize, - Workers: s.conf.Workers, - InstanceID: s.conf.InstanceID, - } - - s.V1Server, err = NewV1Instance(s.instanceConf) - if err != nil { - return errors.Wrap(err, "while creating new gubernator instance") - } - - // V1Server instance also implements prometheus.Collector interface - _ = s.promRegister.Register(s.V1Server) - - l, err := net.Listen("tcp", s.conf.GRPCListenAddress) + d.Service, err = NewService(Config{ + PeerClientFactory: func(info PeerInfo) PeerClient { + return NewPeerClient(WithDaemonConfig(d.conf, info.HTTPAddress)) + }, + CacheFactory: func(maxSize int) Cache { + cache := NewLRUCache(maxSize) + cacheCollector.AddCache(cache) + return cache + }, + DataCenter: d.conf.DataCenter, + CacheSize: d.conf.CacheSize, + Behaviors: d.conf.Behaviors, + Workers: d.conf.Workers, + LocalPicker: d.conf.Picker, + Loader: d.conf.Loader, + Store: d.conf.Store, + Logger: d.log, + }) if err != nil { - return errors.Wrap(err, "while starting GRPC listener") + return errors.Wrap(err, "while creating new gubernator service") } - s.GRPCListeners = append(s.GRPCListeners, l) - - // Start serving GRPC Requests - s.wg.Go(func() { - s.log.Infof("GRPC Listening on %s ...", l.Addr().String()) - if err := s.grpcSrvs[0].Serve(l); err != nil { - s.log.WithError(err).Error("while starting GRPC server") - } - }) - var gatewayAddr string - if s.conf.ServerTLS() != nil { - // We start a new local GRPC instance because we can't guarantee the TLS cert provided by the - // user has localhost or the local interface included in the certs' valid hostnames. If they are not - // included, it means the local gateway connections will not be able to connect. - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return errors.Wrap(err, "while starting GRPC Gateway listener") - } - s.GRPCListeners = append(s.GRPCListeners, l) - - s.wg.Go(func() { - s.log.Infof("GRPC Gateway Listening on %s ...", l.Addr()) - if err := s.grpcSrvs[1].Serve(l); err != nil { - s.log.WithError(err).Error("while starting GRPC Gateway server") - } - }) - gatewayAddr = l.Addr().String() - } else { - gatewayAddr, err = ResolveHostIP(s.conf.GRPCListenAddress) - if err != nil { - return errors.Wrap(err, "while resolving GRPC gateway client address") - } - } + // Service implements prometheus.Collector interface + registry.MustRegister(d.Service) - switch s.conf.PeerDiscoveryType { + switch d.conf.PeerDiscoveryType { case "k8s": // Source our list of peers from kubernetes endpoint API - s.conf.K8PoolConf.OnUpdate = s.V1Server.SetPeers - s.pool, err = NewK8sPool(s.conf.K8PoolConf) + d.conf.K8PoolConf.OnUpdate = d.Service.SetPeers + d.pool, err = NewK8sPool(d.conf.K8PoolConf) if err != nil { return errors.Wrap(err, "while querying kubernetes API") } case "etcd": - s.conf.EtcdPoolConf.OnUpdate = s.V1Server.SetPeers + d.conf.EtcdPoolConf.OnUpdate = d.Service.SetPeers // Register ourselves with other peers via ETCD - s.conf.EtcdPoolConf.Client, err = etcdutil.NewClient(s.conf.EtcdPoolConf.EtcdConfig) + d.conf.EtcdPoolConf.Client, err = etcdutil.NewClient(d.conf.EtcdPoolConf.EtcdConfig) if err != nil { return errors.Wrap(err, "while connecting to etcd") } - s.pool, err = NewEtcdPool(s.conf.EtcdPoolConf) + d.pool, err = NewEtcdPool(d.conf.EtcdPoolConf) if err != nil { return errors.Wrap(err, "while creating etcd pool") } case "dns": - s.conf.DNSPoolConf.OnUpdate = s.V1Server.SetPeers - s.pool, err = NewDNSPool(s.conf.DNSPoolConf) + d.conf.DNSPoolConf.OnUpdate = d.Service.SetPeers + d.pool, err = NewDNSPool(d.conf.DNSPoolConf) if err != nil { return errors.Wrap(err, "while creating the DNS pool") } case "member-list": - s.conf.MemberListPoolConf.OnUpdate = s.V1Server.SetPeers - s.conf.MemberListPoolConf.Logger = s.log + d.conf.MemberListPoolConf.OnUpdate = d.Service.SetPeers + d.conf.MemberListPoolConf.Logger = d.log // Register peer on the member list - s.pool, err = NewMemberListPool(ctx, s.conf.MemberListPoolConf) + d.pool, err = NewMemberListPool(ctx, d.conf.MemberListPoolConf) if err != nil { return errors.Wrap(err, "while creating member list pool") } } - // We override the default Marshaller to enable the `UseProtoNames` option. - // We do this is because the default JSONPb in 2.5.0 marshals proto structs using - // `camelCase`, while all the JSON annotations are `under_score`. - // Our protobuf files follow the convention described here - // https://developers.google.com/protocol-buffers/docs/style#message-and-field-names - // Camel case breaks unmarshalling our GRPC gateway responses with protobuf structs. - gateway := runtime.NewServeMux( - runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - MarshalOptions: protojson.MarshalOptions{ - UseProtoNames: true, - EmitUnpopulated: true, - }, - UnmarshalOptions: protojson.UnmarshalOptions{ - DiscardUnknown: true, - }, - }), - ) - - // Set up an JSON Gateway API for our GRPC methods - var gwCtx context.Context - gwCtx, s.gwCancel = context.WithCancel(context.Background()) - err = RegisterV1HandlerFromEndpoint(gwCtx, gateway, gatewayAddr, - []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}) - if err != nil { - return errors.Wrap(err, "while registering GRPC gateway handler") - } - - // Serve the JSON Gateway and metrics handlers via standard HTTP/1 - mux := http.NewServeMux() - // Optionally collect process metrics - if s.conf.MetricFlags.Has(FlagOSMetrics) { - s.log.Debug("Collecting OS Metrics") - s.promRegister.MustRegister(collectors.NewProcessCollector( + if d.conf.MetricFlags.Has(FlagOSMetrics) { + d.log.Debug("Collecting OS Metrics") + registry.MustRegister(collectors.NewProcessCollector( collectors.ProcessCollectorOpts{Namespace: "gubernator"}, )) } // Optionally collect golang internal metrics - if s.conf.MetricFlags.Has(FlagGolangMetrics) { - s.log.Debug("Collecting Golang Metrics") - s.promRegister.MustRegister(collectors.NewGoCollector()) + if d.conf.MetricFlags.Has(FlagGolangMetrics) { + d.log.Debug("Collecting Golang Metrics") + registry.MustRegister(collectors.NewGoCollector()) + } + + handler := NewHandler(d.Service, promhttp.InstrumentMetricHandler( + registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}), + )) + registry.MustRegister(handler) + + if d.conf.ServerTLS() != nil { + if err := d.spawnHTTPS(ctx, handler); err != nil { + return err + } + if d.conf.HTTPStatusListenAddress != "" { + if err := d.spawnHTTPHealthCheck(ctx, handler, registry); err != nil { + return err + } + } + } else { + if err := d.spawnHTTP(ctx, handler); err != nil { + return err + } } + d.PeerInfo = PeerInfo{ + HTTPAddress: d.Listener.Addr().String(), + DataCenter: d.conf.DataCenter, + } + + return nil +} + +// spawnHTTPHealthCheck spawns a plan HTTP listener for use by orchestration systems to preform health checks and +// collect metrics when TLS and client certs are in use. +func (d *Daemon) spawnHTTPHealthCheck(ctx context.Context, h *Handler, r *prometheus.Registry) error { + mux := http.NewServeMux() + mux.HandleFunc("/healthz", h.HealthZ) mux.Handle("/metrics", promhttp.InstrumentMetricHandler( - s.promRegister, promhttp.HandlerFor(s.promRegister, promhttp.HandlerOpts{}), + r, promhttp.HandlerFor(r, promhttp.HandlerOpts{}), )) - mux.Handle("/", gateway) - s.logWriter = newLogWriter(s.log) - log := log.New(s.logWriter, "", 0) - s.httpSrv = &http.Server{Addr: s.conf.HTTPListenAddress, Handler: mux, ErrorLog: log} + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(d.log), "", 0), + Addr: d.conf.HTTPStatusListenAddress, + TLSConfig: d.conf.ServerTLS().Clone(), + Handler: mux, + } - s.HTTPListener, err = net.Listen("tcp", s.conf.HTTPListenAddress) + srv.TLSConfig.ClientAuth = tls.NoClientCert + var err error + d.HealthListener, err = net.Listen("tcp", d.conf.HTTPStatusListenAddress) if err != nil { - return errors.Wrap(err, "while starting HTTP listener") + return errors.Wrap(err, "while starting HTTP listener for health metric") } - httpListenerAddr := s.HTTPListener.Addr().String() - addrs := []string{httpListenerAddr} - - if s.conf.ServerTLS() != nil { - - // If configured, start another listener at configured address and server only - // /v1/HealthCheck while not requesting or verifying client certificate. - if s.conf.HTTPStatusListenAddress != "" { - muxNoMTLS := http.NewServeMux() - muxNoMTLS.Handle("/v1/HealthCheck", gateway) - s.httpSrvNoMTLS = &http.Server{ - Addr: s.conf.HTTPStatusListenAddress, - Handler: muxNoMTLS, - ErrorLog: log, - TLSConfig: s.conf.ServerTLS().Clone(), - } - s.httpSrvNoMTLS.TLSConfig.ClientAuth = tls.NoClientCert - httpListener, err := net.Listen("tcp", s.conf.HTTPStatusListenAddress) - if err != nil { - return errors.Wrap(err, "while starting HTTP listener for health metric") + d.wg.Go(func() { + d.log.Infof("HTTPS Health Check Listening on %d ...", d.conf.HTTPStatusListenAddress) + if err := srv.ServeTLS(d.HealthListener, "", ""); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + d.log.WithError(err).Error("while starting TLS Status HTTP server") } - httpAddr := httpListener.Addr().String() - addrs = append(addrs, httpAddr) - s.wg.Go(func() { - s.log.Infof("HTTPS Status Handler Listening on %s ...", httpAddr) - if err := s.httpSrvNoMTLS.ServeTLS(httpListener, "", ""); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting TLS Status HTTP server") - } - } - }) } + }) - // This is to avoid any race conditions that might occur - // since the tls config is a shared pointer. - s.httpSrv.TLSConfig = s.conf.ServerTLS().Clone() - s.wg.Go(func() { - s.log.Infof("HTTPS Gateway Listening on %s ...", httpListenerAddr) - if err := s.httpSrv.ServeTLS(s.HTTPListener, "", ""); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting TLS HTTP server") - } - } - }) - } else { - s.wg.Go(func() { - s.log.Infof("HTTP Gateway Listening on %s ...", httpListenerAddr) - if err := s.httpSrv.Serve(s.HTTPListener); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting HTTP server") - } - } - }) + if err := WaitForConnect(ctx, d.HealthListener.Addr().String(), nil); err != nil { + return err } - // Validate we can reach the GRPC and HTTP endpoints before returning - for _, l := range s.GRPCListeners { - addrs = append(addrs, l.Addr().String()) + d.httpServers = append(d.httpServers, srv) + return nil +} + +func (d *Daemon) spawnHTTPS(ctx context.Context, mux http.Handler) error { + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(d.log), "", 0), + TLSConfig: d.conf.ServerTLS().Clone(), + Addr: d.conf.HTTPListenAddress, + Handler: mux, + } + + var err error + d.Listener, err = net.Listen("tcp", d.conf.HTTPListenAddress) + if err != nil { + return errors.Wrap(err, "while starting HTTPS listener") } - if err := WaitForConnect(ctx, addrs); err != nil { + + d.wg.Go(func() { + d.log.Infof("HTTPS Listening on %d ...", d.conf.HTTPListenAddress) + if err := srv.ServeTLS(d.Listener, "", ""); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + d.log.WithError(err).Error("while starting TLS HTTP server") + } + + } + }) + if err := WaitForConnect(ctx, d.Listener.Addr().String(), d.conf.ClientTLS()); err != nil { return err } + d.httpServers = append(d.httpServers, srv) + return nil } -// Close gracefully closes all server connections and listening sockets -func (s *Daemon) Close() { - if s.httpSrv == nil && s.httpSrvNoMTLS == nil { - return +func (d *Daemon) spawnHTTP(ctx context.Context, h http.Handler) error { + // TODO(thrawn01): Remove HTTP/2 support, HTTP/1 is currently faster than HTTP/2 + + // Support H2C (HTTP/2 ClearText) + // See https://github.com/thrawn01/h2c-golang-example + h2s := &http2.Server{} + + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(d.log), "", 0), + Addr: d.conf.HTTPListenAddress, + Handler: h2c.NewHandler(h, h2s), + } + var err error + d.Listener, err = net.Listen("tcp", d.conf.HTTPListenAddress) + if err != nil { + return errors.Wrap(err, "while starting HTTP listener") } - if s.pool != nil { - s.pool.Close() + d.wg.Go(func() { + d.log.Infof("HTTP Listening on %d ...", d.conf.HTTPListenAddress) + if err := srv.Serve(d.Listener); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + d.log.WithError(err).Error("while starting HTTP server") + } + } + }) + + if err := WaitForConnect(ctx, d.Listener.Addr().String(), nil); err != nil { + return err } - s.log.Infof("HTTP Gateway close for %s ...", s.conf.HTTPListenAddress) - _ = s.httpSrv.Shutdown(context.Background()) - if s.httpSrvNoMTLS != nil { - s.log.Infof("HTTP Status Gateway close for %s ...", s.conf.HTTPStatusListenAddress) - _ = s.httpSrvNoMTLS.Shutdown(context.Background()) + d.httpServers = append(d.httpServers, srv) + return nil +} + +// Close gracefully closes all server connections and listening sockets +func (d *Daemon) Close(ctx context.Context) error { + if len(d.httpServers) == 0 { + return nil + } + + for _, srv := range d.httpServers { + d.log.Infof("Shutting down server %d ...", srv.Addr) + _ = srv.Shutdown(ctx) } - for i, srv := range s.grpcSrvs { - s.log.Infof("GRPC close for %s ...", s.GRPCListeners[i].Addr()) - srv.GracefulStop() + if err := d.Service.Close(ctx); err != nil { + return err } - s.logWriter.Close() - _ = s.V1Server.Close() - s.wg.Stop() - s.statsHandler.Close() - s.gwCancel() - s.httpSrv = nil - s.httpSrvNoMTLS = nil - s.grpcSrvs = nil + d.httpServers = nil + return nil } // SetPeers sets the peers for this daemon -func (s *Daemon) SetPeers(in []PeerInfo) { +func (d *Daemon) SetPeers(in []PeerInfo) { peers := make([]PeerInfo, len(in)) copy(peers, in) for i, p := range peers { - if s.conf.GRPCListenAddress == p.GRPCAddress { + if d.conf.AdvertiseAddress == p.HTTPAddress { peers[i].IsOwner = true } } - s.V1Server.SetPeers(peers) + d.Service.SetPeers(peers) } // Config returns the current config for this Daemon -func (s *Daemon) Config() DaemonConfig { - return s.conf +func (d *Daemon) Config() DaemonConfig { + return d.conf } // Peers returns the peers this daemon knows about -func (s *Daemon) Peers() []PeerInfo { +func (d *Daemon) Peers() []PeerInfo { var peers []PeerInfo - for _, client := range s.V1Server.GetPeerList() { + for _, client := range d.Service.GetPeerList() { peers = append(peers, client.Info()) } return peers } -func (s *Daemon) MustClient() V1Client { - c, err := s.Client() +func (d *Daemon) MustClient() Client { + c, err := d.Client() if err != nil { - panic(fmt.Sprintf("[%s] failed to init daemon client - '%s'", s.InstanceID, err)) + panic(fmt.Sprintf("[%s] failed to init daemon client - '%d'", d.InstanceID, err)) } return c } -func (s *Daemon) Client() (V1Client, error) { - if s.client != nil { - return s.client, nil +func (d *Daemon) Client() (Client, error) { + if d.client != nil { + return d.client, nil } + return NewClient(WithNoTLS(d.Listener.Addr().String())) +} - conn, err := grpc.DialContext(context.Background(), - fmt.Sprintf("static:///%s", s.PeerInfo.GRPCAddress), - grpc.WithResolvers(NewStaticBuilder()), - grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, err +// WaitForConnect waits until the passed address is accepting connections. +// It will continue to attempt a connection until context is canceled. +func WaitForConnect(ctx context.Context, address string, cfg *tls.Config) error { + if address == "" { + return fmt.Errorf("WaitForConnect() requires a valid address") } - s.client = NewV1Client(conn) - return s.client, nil -} -// WaitForConnect returns nil if the list of addresses is listening -// for connections; will block until context is cancelled. -func WaitForConnect(ctx context.Context, addresses []string) error { - var d net.Dialer - var errs []error + var errs []string for { - errs = nil - for _, addr := range addresses { - if addr == "" { - continue - } - - // TODO: golang 1.15.3 introduces tls.DialContext(). When we are ready to drop - // support for older versions we can detect tls and use the tls.DialContext to - // avoid the `http: TLS handshake error` we get when using TLS. - conn, err := d.DialContext(ctx, "tcp", addr) - if err != nil { - errs = append(errs, err) - continue - } - _ = conn.Close() + var d proxy.ContextDialer + if cfg != nil { + d = &tls.Dialer{Config: cfg} + } else { + d = &net.Dialer{} } - - if len(errs) == 0 { - break + conn, err := d.DialContext(ctx, "tcp", address) + if err == nil { + _ = conn.Close() + return nil } - - <-ctx.Done() - return ctx.Err() - } - - if len(errs) != 0 { - var errStrings []string - for _, err := range errs { - errStrings = append(errStrings, err.Error()) + errs = append(errs, err.Error()) + if ctx.Err() != nil { + errs = append(errs, ctx.Err().Error()) + return errors.New(strings.Join(errs, "\n")) } - return errors.New(strings.Join(errStrings, "\n")) + time.Sleep(time.Millisecond * 100) + continue } - return nil } diff --git a/dns.go b/dns.go index 024b128..a0126ae 100644 --- a/dns.go +++ b/dns.go @@ -29,8 +29,8 @@ import ( "github.com/sirupsen/logrus" ) -// Adapted from TimothyYe/godns // DNSResolver represents a dns resolver +// Adapted from TimothyYe/godns type DNSResolver struct { Servers []string random *rand.Rand @@ -118,7 +118,7 @@ type DNSPoolConfig struct { // (Required) Filesystem path to "/etc/resolv.conf", override for testing ResolvConf string - // (Required) Own GRPC address + // (Required) Own advertise address OwnAddress string // (Required) Called when the list of gubernators in the pool updates @@ -138,7 +138,7 @@ func NewDNSPool(conf DNSPoolConfig) (*DNSPool, error) { setter.SetDefault(&conf.Logger, logrus.WithField("category", "gubernator")) if conf.OwnAddress == "" { - return nil, errors.New("Advertise.GRPCAddress is required") + return nil, errors.New("AdvertiseAddress is required") } ctx, cancel := context.WithCancel(context.Background()) @@ -157,12 +157,11 @@ func peer(ip string, self string, ipv6 bool) PeerInfo { if ipv6 { ip = "[" + ip + "]" } - grpc := ip + ":81" + addr := ip + ":80" return PeerInfo{ DataCenter: "", - HTTPAddress: ip + ":80", - GRPCAddress: grpc, - IsOwner: grpc == self, + HTTPAddress: addr, + IsOwner: addr == self, } } diff --git a/etcd.go b/etcd.go index 336dfb1..284c66d 100644 --- a/etcd.go +++ b/etcd.go @@ -74,8 +74,8 @@ func NewEtcdPool(conf EtcdPoolConfig) (*EtcdPool, error) { setter.SetDefault(&conf.KeyPrefix, defaultBaseKey) setter.SetDefault(&conf.Logger, logrus.WithField("category", "gubernator")) - if conf.Advertise.GRPCAddress == "" { - return nil, errors.New("Advertise.GRPCAddress is required") + if conf.Advertise.HTTPAddress == "" { + return nil, errors.New("Advertise.HTTPAddress is required") } if conf.Client == nil { @@ -150,7 +150,7 @@ func (e *EtcdPool) collectPeers(revision *int64) error { // Collect all the peers for _, v := range resp.Kvs { p := e.unMarshallValue(v.Value) - peers[p.GRPCAddress] = p + peers[p.HTTPAddress] = p } e.peers = peers @@ -165,7 +165,7 @@ func (e *EtcdPool) unMarshallValue(v []byte) PeerInfo { // for backward compatible with older gubernator versions if err := json.Unmarshal(v, &p); err != nil { e.log.WithError(err).Errorf("while unmarshalling peer info from key value") - return PeerInfo{GRPCAddress: string(v)} + return PeerInfo{HTTPAddress: string(v)} } return p } @@ -219,7 +219,7 @@ func (e *EtcdPool) watch() error { } func (e *EtcdPool) register(peer PeerInfo) error { - instanceKey := e.conf.KeyPrefix + peer.GRPCAddress + instanceKey := e.conf.KeyPrefix + peer.HTTPAddress e.log.Infof("Registering peer '%#v' with etcd", peer) b, err := json.Marshal(peer) @@ -323,7 +323,7 @@ func (e *EtcdPool) callOnUpdate() { var peers []PeerInfo for _, p := range e.peers { - if p.GRPCAddress == e.conf.Advertise.GRPCAddress { + if p.HTTPAddress == e.conf.Advertise.HTTPAddress { p.IsOwner = true } peers = append(peers, p) @@ -332,7 +332,7 @@ func (e *EtcdPool) callOnUpdate() { e.conf.OnUpdate(peers) } -// Get peers list from etcd. +// GetPeers returns a list of peers from etcd. func (e *EtcdPool) GetPeers(ctx context.Context) ([]PeerInfo, error) { keyPrefix := e.conf.KeyPrefix diff --git a/functional_test.go b/functional_test.go index 7161076..58658ce 100644 --- a/functional_test.go +++ b/functional_test.go @@ -17,7 +17,6 @@ limitations under the License. package gubernator_test import ( - "bytes" "context" "fmt" "io" @@ -31,8 +30,8 @@ import ( "testing" "time" - guber "github.com/gubernator-io/gubernator/v2" - "github.com/gubernator-io/gubernator/v2/cluster" + guber "github.com/gubernator-io/gubernator/v3" + "github.com/gubernator-io/gubernator/v3/cluster" "github.com/mailgun/errors" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/syncutil" @@ -42,8 +41,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" json "google.golang.org/protobuf/encoding/protojson" ) @@ -56,15 +53,15 @@ func TestMain(m *testing.M) { } code := m.Run() - cluster.Stop() + cluster.Stop(context.Background()) // os.Exit doesn't run deferred functions os.Exit(code) } func TestOverTheLimit(t *testing.T) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) tests := []struct { Remaining int64 @@ -85,8 +82,9 @@ func TestOverTheLimit(t *testing.T) { } for _, test := range tests { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_over_limit", UniqueKey: "account:1234", @@ -97,11 +95,12 @@ func TestOverTheLimit(t *testing.T) { Behavior: 0, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] + assert.Equal(t, "", rl.Error) assert.Equal(t, test.Status, rl.Status) assert.Equal(t, test.Remaining, rl.Remaining) assert.Equal(t, int64(2), rl.Limit) @@ -116,12 +115,13 @@ func TestMultipleAsync(t *testing.T) { // need to be changed. We want the test to forward both rate limits to other // nodes in the cluster. - t.Logf("Asking Peer: %s", cluster.GetPeers()[0].GRPCAddress) - client, errs := guber.DialV1Server(cluster.GetPeers()[0].GRPCAddress, nil) - require.NoError(t, errs) + t.Logf("Asking Peer: %s", cluster.GetPeers()[0].HTTPAddress) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetPeers()[0].HTTPAddress)) + require.Nil(t, errs) - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_multiple_async", UniqueKey: "account:9234", @@ -141,8 +141,8 @@ func TestMultipleAsync(t *testing.T) { Behavior: 0, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) require.Len(t, resp.Responses, 2) @@ -160,9 +160,9 @@ func TestMultipleAsync(t *testing.T) { func TestTokenBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - addr := cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress - client, err := guber.DialV1Server(addr, nil) - require.NoError(t, err) + addr := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + client, errs := guber.NewClient(guber.WithNoTLS(addr)) + require.Nil(t, errs) tests := []struct { name string @@ -192,8 +192,9 @@ func TestTokenBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket", UniqueKey: "account:1234", @@ -203,8 +204,8 @@ func TestTokenBucket(t *testing.T) { Hits: 1, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] @@ -221,8 +222,8 @@ func TestTokenBucket(t *testing.T) { func TestTokenBucketGregorian(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Name string @@ -266,8 +267,9 @@ func TestTokenBucketGregorian(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket_greg", UniqueKey: "account:12345", @@ -278,8 +280,8 @@ func TestTokenBucketGregorian(t *testing.T) { Limit: 60, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] @@ -296,9 +298,9 @@ func TestTokenBucketGregorian(t *testing.T) { func TestTokenBucketNegativeHits(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - addr := cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress - client, err := guber.DialV1Server(addr, nil) - require.NoError(t, err) + addr := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + client, errs := guber.NewClient(guber.WithNoTLS(addr)) + require.Nil(t, errs) tests := []struct { name string @@ -339,8 +341,9 @@ func TestTokenBucketNegativeHits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket_negative", UniqueKey: "account:12345", @@ -350,8 +353,8 @@ func TestTokenBucketNegativeHits(t *testing.T) { Hits: tt.Hits, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] @@ -367,8 +370,8 @@ func TestTokenBucketNegativeHits(t *testing.T) { func TestDrainOverLimit(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) tests := []struct { Name string @@ -404,8 +407,9 @@ func TestDrainOverLimit(t *testing.T) { for _, test := range tests { ctx := context.Background() t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_drain_over_limit", UniqueKey: fmt.Sprintf("account:1234:%d", idx), @@ -416,7 +420,7 @@ func TestDrainOverLimit(t *testing.T) { Limit: 10, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -434,14 +438,15 @@ func TestDrainOverLimit(t *testing.T) { func TestTokenBucketRequestMoreThanAvailable(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) - sendHit := func(status guber.Status, remain int64, hit int64) *guber.RateLimitResp { + sendHit := func(status guber.Status, remain int64, hit int64) *guber.RateLimitResponse { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_more_than_available", UniqueKey: "account:123456", @@ -451,7 +456,7 @@ func TestTokenBucketRequestMoreThanAvailable(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err, hit) assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, status, resp.Responses[0].Status) @@ -477,8 +482,8 @@ func TestTokenBucketRequestMoreThanAvailable(t *testing.T) { func TestLeakyBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Name string @@ -575,8 +580,9 @@ func TestLeakyBucket(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket", UniqueKey: "account:1234", @@ -586,7 +592,7 @@ func TestLeakyBucket(t *testing.T) { Limit: 10, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -604,8 +610,8 @@ func TestLeakyBucket(t *testing.T) { func TestLeakyBucketWithBurst(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Name string @@ -681,8 +687,9 @@ func TestLeakyBucketWithBurst(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_with_burst", UniqueKey: "account:1234", @@ -693,7 +700,7 @@ func TestLeakyBucketWithBurst(t *testing.T) { Burst: 20, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -711,8 +718,8 @@ func TestLeakyBucketWithBurst(t *testing.T) { func TestLeakyBucketGregorian(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Name string @@ -733,7 +740,7 @@ func TestLeakyBucketGregorian(t *testing.T) { Hits: 1, Remaining: 58, Status: guber.Status_UNDER_LIMIT, - Sleep: clock.Millisecond * 1200, + Sleep: clock.Second, }, { Name: "third hit; leak one hit", @@ -743,21 +750,15 @@ func TestLeakyBucketGregorian(t *testing.T) { }, } - // Truncate to the nearest minute now := clock.Now() - now = now.Truncate(1 * time.Minute) - // So we don't start on the minute boundary - now = now.Add(time.Millisecond * 100) - name := t.Name() - key := guber.RandomString(10) - for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { - Name: name, - UniqueKey: key, + Name: "test_leaky_bucket_greg", + UniqueKey: "account:12345", Behavior: guber.Behavior_DURATION_IS_GREGORIAN, Algorithm: guber.Algorithm_LEAKY_BUCKET, Duration: guber.GregorianMinutes, @@ -765,14 +766,16 @@ func TestLeakyBucketGregorian(t *testing.T) { Limit: 60, }, }, - }) + }, &resp) + clock.Freeze(clock.Now()) require.NoError(t, err) rl := resp.Responses[0] + assert.Equal(t, test.Status, rl.Status) assert.Equal(t, test.Remaining, rl.Remaining) assert.Equal(t, int64(60), rl.Limit) - assert.Greater(t, rl.ResetTime, now.Unix()) + assert.True(t, rl.ResetTime > now.Unix()) clock.Advance(test.Sleep) }) } @@ -781,8 +784,8 @@ func TestLeakyBucketGregorian(t *testing.T) { func TestLeakyBucketNegativeHits(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Name string @@ -823,8 +826,9 @@ func TestLeakyBucketNegativeHits(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_negative", UniqueKey: "account:12345", @@ -834,7 +838,7 @@ func TestLeakyBucketNegativeHits(t *testing.T) { Limit: 10, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -853,14 +857,15 @@ func TestLeakyBucketRequestMoreThanAvailable(t *testing.T) { // Freeze time so we don't leak during the test defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) - sendHit := func(status guber.Status, remain int64, hits int64) *guber.RateLimitResp { + sendHit := func(status guber.Status, remain int64, hits int64) *guber.RateLimitResponse { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_more_than_available", UniqueKey: "account:123456", @@ -870,7 +875,7 @@ func TestLeakyBucketRequestMoreThanAvailable(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err) assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, status, resp.Responses[0].Status) @@ -894,16 +899,16 @@ func TestLeakyBucketRequestMoreThanAvailable(t *testing.T) { } func TestMissingFields(t *testing.T) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.Nil(t, errs) tests := []struct { - Req *guber.RateLimitReq + Req *guber.RateLimitRequest Status guber.Status Error string }{ { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", UniqueKey: "account:1234", Hits: 1, @@ -914,7 +919,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_UNDER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", UniqueKey: "account:12345", Hits: 1, @@ -925,7 +930,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_OVER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ UniqueKey: "account:1234", Hits: 1, Duration: 10000, @@ -935,7 +940,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_UNDER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", Hits: 1, Duration: 10000, @@ -947,10 +952,11 @@ func TestMissingFields(t *testing.T) { } for i, test := range tests { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{test.Req}, - }) - require.NoError(t, err) + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{test.Req}, + }, &resp) + require.Nil(t, err) assert.Equal(t, test.Error, resp.Responses[0].Error, i) assert.Equal(t, test.Status, resp.Responses[0].Status, i) } @@ -965,11 +971,12 @@ func TestGlobalRateLimits(t *testing.T) { require.NoError(t, err) var firstResetTime int64 - sendHit := func(client guber.V1Client, status guber.Status, hits, remain int64) { + sendHit := func(client guber.Client, status guber.Status, hits, remain int64) { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -980,7 +987,7 @@ func TestGlobalRateLimits(t *testing.T) { Limit: 5, }, }, - }) + }, &resp) require.NoError(t, err) item := resp.Responses[0] assert.Equal(t, "", item.Error) @@ -1029,10 +1036,9 @@ func TestGlobalRateLimits(t *testing.T) { sendHit(peers[4].MustClient(), guber.Status_OVER_LIMIT, 1, 0) } -// Ensure global broadcast updates all peers when GetRateLimits is called on +// Ensure global broadcast updates all peers when CheckRateLimits is called on // either owner or non-owner peer. -func TestGlobalRateLimitsWithLoadBalancing(t *testing.T) { - ctx := context.Background() +func TestGlobalRateLimitsBroadcastUpdate(t *testing.T) { name := t.Name() key := guber.RandomString(10) @@ -1043,22 +1049,19 @@ func TestGlobalRateLimitsWithLoadBalancing(t *testing.T) { require.NoError(t, err) nonOwner := peers[0] - // Connect to owner and non-owner peers in round robin. - dialOpts := []grpc.DialOption{ - grpc.WithResolvers(guber.NewStaticBuilder()), - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), - } - address := fmt.Sprintf("static:///%s,%s", owner.PeerInfo.GRPCAddress, nonOwner.PeerInfo.GRPCAddress) - conn, err := grpc.DialContext(ctx, address, dialOpts...) + // Create a client for an owner and non-owner + client := owner.MustClient() require.NoError(t, err) - client := guber.NewV1Client(conn) - sendHit := func(client guber.V1Client, status guber.Status, i int) { + peerClient := nonOwner.MustClient() + require.NoError(t, err) + + sendHit := func(client guber.Client, status guber.Status, i int) { ctx, cancel := context.WithTimeout(context.Background(), 10*clock.Second) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1069,7 +1072,7 @@ func TestGlobalRateLimitsWithLoadBalancing(t *testing.T) { Limit: 2, }, }, - }) + }, &resp) require.NoError(t, err, i) item := resp.Responses[0] assert.Equal(t, "", item.Error, fmt.Sprintf("unexpected error, iteration %d", i)) @@ -1081,7 +1084,7 @@ func TestGlobalRateLimitsWithLoadBalancing(t *testing.T) { // Send two hits that should be processed by the owner and non-owner and // deplete the limit consistently. sendHit(client, guber.Status_UNDER_LIMIT, 1) - sendHit(client, guber.Status_UNDER_LIMIT, 2) + sendHit(peerClient, guber.Status_UNDER_LIMIT, 2) require.NoError(t, waitForBroadcast(3*clock.Second, owner, 1)) // All successive hits should return OVER_LIMIT. @@ -1101,8 +1104,9 @@ func TestGlobalRateLimitsPeerOverLimit(t *testing.T) { sendHit := func(expectedStatus guber.Status, hits, expectedRemaining int64) { ctx, cancel := context.WithTimeout(context.Background(), 10*clock.Second) defer cancel() - resp, err := peers[0].MustClient().GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := peers[0].MustClient().CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1113,7 +1117,7 @@ func TestGlobalRateLimitsPeerOverLimit(t *testing.T) { Limit: 2, }, }, - }) + }, &resp) assert.NoError(t, err) item := resp.Responses[0] assert.Equal(t, "", item.Error, "unexpected error") @@ -1149,11 +1153,12 @@ func TestGlobalRequestMoreThanAvailable(t *testing.T) { peers, err := cluster.ListNonOwningDaemons(name, key) require.NoError(t, err) - sendHit := func(client guber.V1Client, expectedStatus guber.Status, hits int64, remaining int64) { + sendHit := func(client guber.Client, expectedStatus guber.Status, hits int64, remaining int64) { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1164,7 +1169,7 @@ func TestGlobalRequestMoreThanAvailable(t *testing.T) { Limit: 100, }, }, - }) + }, &resp) assert.NoError(t, err) assert.Equal(t, "", resp.Responses[0].GetError()) assert.Equal(t, expectedStatus, resp.Responses[0].GetStatus()) @@ -1209,11 +1214,12 @@ func TestGlobalNegativeHits(t *testing.T) { peers, err := cluster.ListNonOwningDaemons(name, key) require.NoError(t, err) - sendHit := func(client guber.V1Client, status guber.Status, hits int64, remaining int64) { + sendHit := func(client guber.Client, status guber.Status, hits int64, remaining int64) { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1224,7 +1230,7 @@ func TestGlobalNegativeHits(t *testing.T) { Limit: 2, }, }, - }) + }, &resp) assert.NoError(t, err) assert.Equal(t, "", resp.Responses[0].GetError()) assert.Equal(t, status, resp.Responses[0].GetStatus()) @@ -1263,11 +1269,12 @@ func TestGlobalResetRemaining(t *testing.T) { peers, err := cluster.ListNonOwningDaemons(name, key) require.NoError(t, err) - sendHit := func(client guber.V1Client, expectedStatus guber.Status, hits int64, remaining int64) { + sendHit := func(client guber.Client, expectedStatus guber.Status, hits int64, remaining int64) { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1278,7 +1285,7 @@ func TestGlobalResetRemaining(t *testing.T) { Limit: 100, }, }, - }) + }, &resp) assert.NoError(t, err) assert.Equal(t, "", resp.Responses[0].GetError()) assert.Equal(t, expectedStatus, resp.Responses[0].GetStatus()) @@ -1303,8 +1310,9 @@ func TestGlobalResetRemaining(t *testing.T) { // Now reset the remaining ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) defer cancel() - resp, err := peers[0].MustClient().GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err = peers[0].MustClient().CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1315,7 +1323,7 @@ func TestGlobalResetRemaining(t *testing.T) { Limit: 100, }, }, - }) + }, &resp) require.NoError(t, err) assert.NotEqual(t, 100, resp.Responses[0].Remaining) @@ -1323,8 +1331,8 @@ func TestGlobalResetRemaining(t *testing.T) { require.NoError(t, waitForBroadcast(clock.Second*10, owner, prev+2)) // Check a different peer to ensure remaining has been reset - resp, err = peers[1].MustClient().GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + err = peers[1].MustClient().CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1335,14 +1343,14 @@ func TestGlobalResetRemaining(t *testing.T) { Limit: 100, }, }, - }) + }, &resp) require.NoError(t, err) assert.NotEqual(t, 100, resp.Responses[0].Remaining) } func TestChangeLimit(t *testing.T) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Remaining int64 @@ -1411,8 +1419,9 @@ func TestChangeLimit(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_change_limit", UniqueKey: "account:1234", @@ -1422,8 +1431,8 @@ func TestChangeLimit(t *testing.T) { Hits: 1, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] @@ -1436,8 +1445,8 @@ func TestChangeLimit(t *testing.T) { } func TestResetRemaining(t *testing.T) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.Nil(t, errs) tests := []struct { Remaining int64 @@ -1483,8 +1492,9 @@ func TestResetRemaining(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_reset_remaining", UniqueKey: "account:1234", @@ -1495,8 +1505,8 @@ func TestResetRemaining(t *testing.T) { Hits: 1, }, }, - }) - require.NoError(t, err) + }, &resp) + require.Nil(t, err) rl := resp.Responses[0] @@ -1508,39 +1518,89 @@ func TestResetRemaining(t *testing.T) { } func TestHealthCheck(t *testing.T) { - // Check that the cluster is healthy to start with. - for _, peer := range cluster.GetDaemons() { - healthResp, err := peer.MustClient().HealthCheck(context.Background(), &guber.HealthCheckReq{}) + client, err := guber.NewClient(guber.WithNoTLS(cluster.DaemonAt(0).Listener.Addr().String())) + require.NoError(t, err) + + // Check that the cluster is healthy to start with + var resp guber.HealthCheckResponse + err = client.HealthCheck(context.Background(), &resp) + require.NoError(t, err) + + require.Equal(t, "healthy", resp.GetStatus()) + + // Create a global rate limit that will need to be sent to all peers in the cluster + { + var resp guber.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ + { + Name: "test_health_check", + UniqueKey: "account:12345", + Algorithm: guber.Algorithm_TOKEN_BUCKET, + Behavior: guber.Behavior_BATCHING, + Duration: guber.Second * 3, + Hits: 1, + Limit: 5, + }, + }, + }, &resp) require.NoError(t, err) - assert.Equal(t, "healthy", healthResp.Status) } - // Stop the cluster to ensure errors occur on our instance. - cluster.Stop() + // Stop the rest of the cluster to ensure errors occur on our instance + for i := 1; i < cluster.NumOfDaemons(); i++ { + d := cluster.DaemonAt(i) + require.NotNil(t, d) + _ = d.Close(context.Background()) + } - // Check the health again to get back the connection error. - testutil.UntilPass(t, 20, 300*clock.Millisecond, func(t testutil.TestingT) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - for _, peer := range cluster.GetDaemons() { - _, err := peer.MustClient().HealthCheck(ctx, &guber.HealthCheckReq{}) - assert.Error(t, err, "connect: connection refused") + // Hit the global rate limit again this time causing a connection error + { + var resp guber.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ + { + Name: "test_health_check", + UniqueKey: "account:12345", + Algorithm: guber.Algorithm_TOKEN_BUCKET, + Behavior: guber.Behavior_GLOBAL, + Duration: guber.Second * 3, + Hits: 1, + Limit: 5, + }, + }, + }, &resp) + require.NoError(t, err) + } + + testutil.UntilPass(t, 20, clock.Millisecond*300, func(t testutil.TestingT) { + // Check the health again to get back the connection error + var resp guber.HealthCheckResponse + err = client.HealthCheck(context.Background(), &resp) + if assert.Nil(t, err) { + return } + + assert.Equal(t, "unhealthy", resp.GetStatus()) + assert.Contains(t, resp.GetMessage(), "connect: connection refused") }) - // Restart so cluster is ready for next test. - require.NoError(t, startGubernator()) + // Restart stopped instances + ctx, cancel := context.WithTimeout(context.Background(), clock.Second*15) + defer cancel() + require.NoError(t, cluster.Restart(ctx)) } func TestLeakyBucketDivBug(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() name := t.Name() key := guber.RandomString(10) - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(t, err) + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1550,7 +1610,7 @@ func TestLeakyBucketDivBug(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err) assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, guber.Status_UNDER_LIMIT, resp.Responses[0].Status) @@ -1558,8 +1618,8 @@ func TestLeakyBucketDivBug(t *testing.T) { assert.Equal(t, int64(2000), resp.Responses[0].Limit) // Should result in a rate of 0.5 - resp, err = client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: name, UniqueKey: key, @@ -1569,7 +1629,7 @@ func TestLeakyBucketDivBug(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err) assert.Equal(t, int64(1899), resp.Responses[0].Remaining) assert.Equal(t, int64(2000), resp.Responses[0].Limit) @@ -1585,95 +1645,63 @@ func TestMultiRegion(t *testing.T) { // TODO: Wait until both rate limit count show up on all datacenters } -func TestGRPCGateway(t *testing.T) { - name := t.Name() - key := guber.RandomString(10) - address := cluster.GetRandomPeer(cluster.DataCenterNone).HTTPAddress - resp, err := http.DefaultClient.Get("http://" + address + "/v1/HealthCheck") +func TestDefaultHealthZ(t *testing.T) { + address := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + resp, err := http.DefaultClient.Get("http://" + address + "/healthz") require.NoError(t, err) defer resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode) b, err := io.ReadAll(resp.Body) - // This test ensures future upgrades don't accidentally change `under_score` to `camelCase` again. - assert.Contains(t, string(b), "peer_count") + assert.Contains(t, string(b), "peerCount") - var hc guber.HealthCheckResp + var hc guber.HealthCheckResponse require.NoError(t, json.Unmarshal(b, &hc)) assert.Equal(t, int32(10), hc.PeerCount) require.NoError(t, err) - - payload, err := json.Marshal(&guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ - { - Name: name, - UniqueKey: key, - Duration: guber.Millisecond * 1000, - Hits: 1, - Limit: 10, - }, - }, - }) - require.NoError(t, err) - - resp, err = http.DefaultClient.Post("http://"+address+"/v1/GetRateLimits", - "application/json", bytes.NewReader(payload)) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - b, err = io.ReadAll(resp.Body) - require.NoError(t, err) - var r guber.GetRateLimitsResp - - // NOTE: It is important to use 'protojson' instead of the standard 'json' package - // else the enums will not be converted properly and json.Unmarshal() will return an - // error. - require.NoError(t, json.Unmarshal(b, &r)) - require.Equal(t, 1, len(r.Responses)) - assert.Equal(t, guber.Status_UNDER_LIMIT, r.Responses[0].Status) } func TestGetPeerRateLimits(t *testing.T) { name := t.Name() ctx := context.Background() - peerClient, err := guber.NewPeerClient(guber.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), + info := cluster.GetRandomPeerInfo(cluster.DataCenterNone) + + peerClient, err := guber.NewPeer(guber.PeerConfig{ + PeerClient: guber.NewPeerClient(guber.WithNoTLS(info.HTTPAddress)), + Info: info, }) require.NoError(t, err) t.Run("Stable rate check request order", func(t *testing.T) { // Ensure response order matches rate check request order. // Try various batch sizes. - createdAt := epochMillis(clock.Now()) testCases := []int{1, 2, 5, 10, 100, 1000} for _, n := range testCases { t.Run(fmt.Sprintf("Batch size %d", n), func(t *testing.T) { // Build request. - req := &guber.GetPeerRateLimitsReq{ - Requests: make([]*guber.RateLimitReq, n), + req := &guber.ForwardRequest{ + Requests: make([]*guber.RateLimitRequest, n), } for i := 0; i < n; i++ { - req.Requests[i] = &guber.RateLimitReq{ + req.Requests[i] = &guber.RateLimitRequest{ Name: name, - UniqueKey: guber.RandomString(10), + UniqueKey: fmt.Sprintf("%08x", i), Hits: 0, Limit: 1000 + int64(i), Duration: 1000, Algorithm: guber.Algorithm_TOKEN_BUCKET, Behavior: guber.Behavior_BATCHING, - CreatedAt: &createdAt, } } // Send request. - resp, err := peerClient.GetPeerRateLimits(ctx, req) + var resp guber.ForwardResponse + err := peerClient.ForwardBatch(ctx, req, &resp) // Verify. require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.RateLimits, n) for i, item := range resp.RateLimits { @@ -1685,15 +1713,22 @@ func TestGetPeerRateLimits(t *testing.T) { }) } -// TODO: Add a test for sending no rate limits RateLimitReqList.RateLimits = nil +func TestNoRateLimits(t *testing.T) { + client, errs := guber.NewClient(cluster.GetRandomPeerOptions(cluster.DataCenterNone)) + require.Nil(t, errs) + + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{}, &resp) + require.Error(t, err) +} func TestGlobalBehavior(t *testing.T) { const limit = 1000 broadcastTimeout := 400 * time.Millisecond createdAt := epochMillis(clock.Now()) - makeReq := func(name, key string, hits int64) *guber.RateLimitReq { - return &guber.RateLimitReq{ + makeReq := func(name, key string, hits int64) *guber.RateLimitRequest { + return &guber.RateLimitRequest{ Name: name, UniqueKey: key, Algorithm: guber.Algorithm_TOKEN_BUCKET, @@ -1804,7 +1839,7 @@ func TestGlobalBehavior(t *testing.T) { assert.Equal(t, expected, upgCounters2[peer.InstanceID]) } - // Assert PeerGetRateLimits endpoint not called. + // Assert PeerCheckRateLimits endpoint not called. // Used by global hits update. gprlCounters2 := getPeerCounters(t, cluster.GetDaemons(), "gubernator_grpc_request_duration_count{method=\"/pb.gubernator.PeersV1/GetPeerRateLimits\"}") for _, peer := range cluster.GetDaemons() { @@ -1923,7 +1958,7 @@ func TestGlobalBehavior(t *testing.T) { assert.Equal(t, expected, upgCounters2[peer.InstanceID], "upgCounter %s", peer.InstanceID) } - // Assert PeerGetRateLimits endpoint called once on owner. + // Assert PeerCheckRateLimits endpoint called once on owner. // Used by global hits update. gprlCounters2 := getPeerCounters(t, cluster.GetDaemons(), "gubernator_grpc_request_duration_count{method=\"/pb.gubernator.PeersV1/GetPeerRateLimits\"}") for _, peer := range cluster.GetDaemons() { @@ -2072,7 +2107,7 @@ func TestGlobalBehavior(t *testing.T) { assert.GreaterOrEqual(t, upgCounters2[peer.InstanceID], expected, "upgCounter %s", peer.InstanceID) } - // Assert PeerGetRateLimits endpoint called on owner + // Assert PeerCheckRateLimits endpoint called on owner // for each non-owner that received hits. // Used by global hits update. gprlCounters2 := getPeerCounters(t, cluster.GetDaemons(), "gubernator_grpc_request_duration_count{method=\"/pb.gubernator.PeersV1/GetPeerRateLimits\"}") @@ -2294,16 +2329,18 @@ func getPeerCounters(t *testing.T, peers []*guber.Daemon, name string) map[strin return counters } -func sendHit(t *testing.T, d *guber.Daemon, req *guber.RateLimitReq, expectStatus guber.Status, expectRemaining int64) { +func sendHit(t *testing.T, d *guber.Daemon, req *guber.RateLimitRequest, expectStatus guber.Status, expectRemaining int64) { if req.Hits != 0 { t.Logf("Sending %d hits to peer %s", req.Hits, d.InstanceID) } client := d.MustClient() ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{req}, - }) + + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) item := resp.Responses[0] assert.Equal(t, "", item.Error) @@ -2320,18 +2357,18 @@ func epochMillis(t time.Time) int64 { func startGubernator() error { err := cluster.StartWith([]guber.PeerInfo{ - {GRPCAddress: "127.0.0.1:9990", HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9991", HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9992", HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9993", HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9994", HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9995", HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, // DataCenterOne - {GRPCAddress: "127.0.0.1:9890", HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9891", HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9892", HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9893", HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, }) if err != nil { return errors.Wrap(err, "while starting cluster") diff --git a/global.go b/global.go index c5fe167..745c166 100644 --- a/global.go +++ b/global.go @@ -28,23 +28,23 @@ import ( // globalManager manages async hit queue and updates peers in // the cluster periodically when a global rate limit we own updates. type globalManager struct { - hitsQueue chan *RateLimitReq - broadcastQueue chan *RateLimitReq + hitsQueue chan *RateLimitRequest + broadcastQueue chan *RateLimitRequest wg syncutil.WaitGroup conf BehaviorConfig log FieldLogger - instance *V1Instance // TODO circular import? V1Instance also holds a reference to globalManager + instance *Service // TODO circular import? Service also holds a reference to globalManager metricGlobalSendDuration prometheus.Summary metricGlobalSendQueueLength prometheus.Gauge metricBroadcastDuration prometheus.Summary metricGlobalQueueLength prometheus.Gauge } -func newGlobalManager(conf BehaviorConfig, instance *V1Instance) *globalManager { +func newGlobalManager(conf BehaviorConfig, instance *Service) *globalManager { gm := globalManager{ log: instance.log, - hitsQueue: make(chan *RateLimitReq, conf.GlobalBatchLimit), - broadcastQueue: make(chan *RateLimitReq, conf.GlobalBatchLimit), + hitsQueue: make(chan *RateLimitRequest, conf.GlobalBatchLimit), + broadcastQueue: make(chan *RateLimitRequest, conf.GlobalBatchLimit), instance: instance, conf: conf, metricGlobalSendDuration: prometheus.NewSummary(prometheus.SummaryOpts{ @@ -71,13 +71,13 @@ func newGlobalManager(conf BehaviorConfig, instance *V1Instance) *globalManager return &gm } -func (gm *globalManager) QueueHit(r *RateLimitReq) { +func (gm *globalManager) QueueHit(r *RateLimitRequest) { if r.Hits != 0 { gm.hitsQueue <- r } } -func (gm *globalManager) QueueUpdate(req *RateLimitReq) { +func (gm *globalManager) QueueUpdate(req *RateLimitRequest) { if req.Hits != 0 { gm.broadcastQueue <- req } @@ -90,7 +90,7 @@ func (gm *globalManager) QueueUpdate(req *RateLimitReq) { // and in a periodic frequency determined by GlobalSyncWait. func (gm *globalManager) runAsyncHits() { var interval = NewInterval(gm.conf.GlobalSyncWait) - hits := make(map[string]*RateLimitReq) + hits := make(map[string]*RateLimitRequest) gm.wg.Until(func(done chan struct{}) bool { @@ -114,7 +114,7 @@ func (gm *globalManager) runAsyncHits() { // Send the hits if we reached our batch limit if len(hits) == gm.conf.GlobalBatchLimit { gm.sendHits(hits) - hits = make(map[string]*RateLimitReq) + hits = make(map[string]*RateLimitRequest) gm.metricGlobalSendQueueLength.Set(0) return true } @@ -128,7 +128,7 @@ func (gm *globalManager) runAsyncHits() { case <-interval.C: if len(hits) != 0 { gm.sendHits(hits) - hits = make(map[string]*RateLimitReq) + hits = make(map[string]*RateLimitRequest) gm.metricGlobalSendQueueLength.Set(0) } case <-done: @@ -141,10 +141,10 @@ func (gm *globalManager) runAsyncHits() { // sendHits takes the hits collected by runAsyncHits and sends them to their // owning peers -func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { +func (gm *globalManager) sendHits(hits map[string]*RateLimitRequest) { type pair struct { - client *PeerClient - req GetPeerRateLimitsReq + client *Peer + req ForwardRequest } defer prometheus.NewTimer(gm.metricGlobalSendDuration).ObserveDuration() peerRequests := make(map[string]*pair) @@ -156,13 +156,13 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { gm.log.WithError(err).Errorf("while getting peer for hash key '%s'", r.HashKey()) continue } - p, ok := peerRequests[peer.Info().GRPCAddress] + p, ok := peerRequests[peer.Info().HTTPAddress] if ok { p.req.Requests = append(p.req.Requests, r) } else { - peerRequests[peer.Info().GRPCAddress] = &pair{ + peerRequests[peer.Info().HTTPAddress] = &pair{ client: peer, - req: GetPeerRateLimitsReq{Requests: []*RateLimitReq{r}}, + req: ForwardRequest{Requests: []*RateLimitRequest{r}}, } } } @@ -173,12 +173,13 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { fan.Run(func(in interface{}) error { p := in.(*pair) ctx, cancel := context.WithTimeout(context.Background(), gm.conf.GlobalTimeout) - _, err := p.client.GetPeerRateLimits(ctx, &p.req) + var resp ForwardResponse + err := p.client.ForwardBatch(ctx, &p.req, &resp) cancel() if err != nil { gm.log.WithError(err). - Errorf("while sending global hits to '%s'", p.client.Info().GRPCAddress) + Errorf("while sending global hits to '%s'", p.client.Info().HTTPAddress) } return nil }, p) @@ -192,7 +193,7 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { // and in a periodic frequency determined by GlobalSyncWait. func (gm *globalManager) runBroadcasts() { var interval = NewInterval(gm.conf.GlobalSyncWait) - updates := make(map[string]*RateLimitReq) + updates := make(map[string]*RateLimitRequest) gm.wg.Until(func(done chan struct{}) bool { select { @@ -203,7 +204,7 @@ func (gm *globalManager) runBroadcasts() { // Send the hits if we reached our batch limit if len(updates) >= gm.conf.GlobalBatchLimit { gm.broadcastPeers(context.Background(), updates) - updates = make(map[string]*RateLimitReq) + updates = make(map[string]*RateLimitRequest) gm.metricGlobalQueueLength.Set(0) return true } @@ -219,7 +220,7 @@ func (gm *globalManager) runBroadcasts() { break } gm.broadcastPeers(context.Background(), updates) - updates = make(map[string]*RateLimitReq) + updates = make(map[string]*RateLimitRequest) gm.metricGlobalQueueLength.Set(0) case <-done: @@ -231,27 +232,28 @@ func (gm *globalManager) runBroadcasts() { } // broadcastPeers broadcasts global rate limit statuses to all other peers -func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string]*RateLimitReq) { +func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string]*RateLimitRequest) { defer prometheus.NewTimer(gm.metricBroadcastDuration).ObserveDuration() - var req UpdatePeerGlobalsReq - reqState := RateLimitReqState{IsOwner: false} + var req UpdateRequest + reqState := RateLimitRequestState{IsOwner: false} gm.metricGlobalQueueLength.Set(float64(len(updates))) for _, update := range updates { - // Get current rate limit state. - grlReq := proto.Clone(update).(*RateLimitReq) + grlReq := proto.Clone(update).(*RateLimitRequest) grlReq.Hits = 0 - status, err := gm.instance.workerPool.GetRateLimit(ctx, grlReq, reqState) + + // Get current rate limit state. + state, err := gm.instance.workerPool.GetRateLimit(ctx, grlReq, reqState) if err != nil { gm.log.WithError(err).Error("while retrieving rate limit status") continue } - updateReq := &UpdatePeerGlobal{ + updateReq := &UpdateRateLimit{ Key: update.HashKey(), Algorithm: update.Algorithm, Duration: update.Duration, - Status: status, + State: state, CreatedAt: *update.CreatedAt, } req.Globals = append(req.Globals, updateReq) @@ -265,15 +267,15 @@ func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string] } fan.Run(func(in interface{}) error { - peer := in.(*PeerClient) + peer := in.(*Peer) ctx, cancel := context.WithTimeout(ctx, gm.conf.GlobalTimeout) - _, err := peer.UpdatePeerGlobals(ctx, &req) + err := peer.Update(ctx, &req) cancel() if err != nil { // Only log if it's an unknown error if !errors.Is(err, context.Canceled) && errors.Is(err, context.DeadlineExceeded) { - gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", peer.Info().GRPCAddress) + gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", peer.Info().HTTPAddress) } } return nil @@ -286,6 +288,6 @@ func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string] func (gm *globalManager) Close() { gm.wg.Stop() for _, peer := range gm.instance.GetPeerList() { - _ = peer.Shutdown(context.Background()) + _ = peer.Close(context.Background()) } } diff --git a/go.mod b/go.mod index 4bc2eeb..25dd7af 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ -module github.com/gubernator-io/gubernator/v2 +module github.com/gubernator-io/gubernator/v3 -go 1.20 +go 1.21 require ( github.com/OneOfOne/xxhash v1.2.8 @@ -43,6 +43,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/go.sum b/go.sum index 7b2a200..b45deef 100644 --- a/go.sum +++ b/go.sum @@ -101,6 +101,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/duh-rpc/duh-go v0.0.1 h1:prxWu5oFU88mAV62cZiBSvTd4gI+sGIb1RzdWbD+Zkc= +github.com/duh-rpc/duh-go v0.0.1/go.mod h1:2XM20vTrrSv4BWRRLoIBAcTqfU6QN3vbCmx+tncdE90= +github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a h1:v/NQEfHHOY/huFECKxKZnEkY5jVD8Yix8TPa0FjgKbg= +github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a/go.mod h1:OoCoGsZkeED84v8TAE86m2NM5ZfNLNlqUUm7tYO+h+k= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= diff --git a/grpc_stats.go b/grpc_stats.go deleted file mode 100644 index 39cc662..0000000 --- a/grpc_stats.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2018-2022 Mailgun Technologies Inc - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gubernator - -import ( - "context" - - "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/syncutil" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/stats" -) - -type GRPCStats struct { - Duration clock.Duration - Method string - Failed float64 - Success float64 -} - -type contextKey struct{} - -var statsContextKey = contextKey{} - -// Implements the Prometheus collector interface. Such that when the /metrics handler is -// called this collector pulls all the stats from -type GRPCStatsHandler struct { - reqCh chan *GRPCStats - wg syncutil.WaitGroup - - grpcRequestCount *prometheus.CounterVec - grpcRequestDuration *prometheus.SummaryVec -} - -func NewGRPCStatsHandler() *GRPCStatsHandler { - c := &GRPCStatsHandler{ - grpcRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "gubernator_grpc_request_counts", - Help: "The count of gRPC requests.", - }, []string{"status", "method"}), - grpcRequestDuration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Name: "gubernator_grpc_request_duration", - Help: "The timings of gRPC requests in seconds", - Objectives: map[float64]float64{ - 0.5: 0.05, - 0.99: 0.001, - }, - }, []string{"method"}), - } - c.run() - return c -} - -func (c *GRPCStatsHandler) run() { - c.reqCh = make(chan *GRPCStats, 10000) - - c.wg.Until(func(done chan struct{}) bool { - select { - case stat := <-c.reqCh: - c.grpcRequestCount.With(prometheus.Labels{"status": "failed", "method": stat.Method}).Add(stat.Failed) - c.grpcRequestCount.With(prometheus.Labels{"status": "success", "method": stat.Method}).Add(stat.Success) - c.grpcRequestDuration.With(prometheus.Labels{"method": stat.Method}).Observe(stat.Duration.Seconds()) - case <-done: - return false - } - return true - }) -} - -func (c *GRPCStatsHandler) Describe(ch chan<- *prometheus.Desc) { - c.grpcRequestCount.Describe(ch) - c.grpcRequestDuration.Describe(ch) -} - -func (c *GRPCStatsHandler) Collect(ch chan<- prometheus.Metric) { - c.grpcRequestCount.Collect(ch) - c.grpcRequestDuration.Collect(ch) -} - -func (c *GRPCStatsHandler) Close() { - c.wg.Stop() -} - -func (c *GRPCStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { - rs := StatsFromContext(ctx) - if rs == nil { - return - } - - switch t := s.(type) { - // case *stats.Begin: - // case *stats.InPayload: - // case *stats.InHeader: - // case *stats.InTrailer: - // case *stats.OutPayload: - // case *stats.OutHeader: - // case *stats.OutTrailer: - case *stats.End: - rs.Duration = t.EndTime.Sub(t.BeginTime) - if t.Error != nil { - rs.Failed = 1 - } else { - rs.Success = 1 - } - c.reqCh <- rs - } -} - -func (c *GRPCStatsHandler) HandleConn(ctx context.Context, s stats.ConnStats) {} - -func (c *GRPCStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { - return ctx -} - -func (c *GRPCStatsHandler) TagRPC(ctx context.Context, tagInfo *stats.RPCTagInfo) context.Context { - return ContextWithStats(ctx, &GRPCStats{Method: tagInfo.FullMethodName}) -} - -// Returns a new `context.Context` that holds a reference to `GRPCStats`. -func ContextWithStats(ctx context.Context, stats *GRPCStats) context.Context { - return context.WithValue(ctx, statsContextKey, stats) -} - -// Returns the `GRPCStats` previously associated with `ctx`. -func StatsFromContext(ctx context.Context) *GRPCStats { - val := ctx.Value(statsContextKey) - if rs, ok := val.(*GRPCStats); ok { - return rs - } - return nil -} diff --git a/gubernator.go b/gubernator.go index ff6812a..22b9a11 100644 --- a/gubernator.go +++ b/gubernator.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" "github.com/mailgun/errors" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/syncutil" @@ -31,8 +33,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" ) @@ -42,25 +42,27 @@ const ( UnHealthy = "unhealthy" ) -type V1Instance struct { - UnimplementedV1Server - UnimplementedPeersV1Server +type Service struct { + propagator propagation.TraceContext global *globalManager peerMutex sync.RWMutex + workerPool *WorkerPool log FieldLogger conf Config isClosed bool - workerPool *WorkerPool } -type RateLimitReqState struct { +// TODO(thrawn01): Consider renaming this to `RateLimitContext` which is only slightly better than `State` as +// both are over used. But seeing the word "state" still confuses me when I come back to this code months later. + +type RateLimitRequestState struct { IsOwner bool } var ( metricGetRateLimitCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "gubernator_getratelimit_counter", - Help: "The count of getLocalRateLimit() calls. Label \"calltype\" may be \"local\" for calls handled by the same peer, or \"global\" for global rate limits.", + Help: "The count of checkLocalRateLimit() calls. Label \"calltype\" may be \"local\" for calls handled by the same peer, or \"global\" for global rate limits.", }, []string{"calltype"}) metricFuncTimeDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ Name: "gubernator_func_duration", @@ -110,18 +112,15 @@ var ( }, []string{"peerAddr"}) ) -// NewV1Instance instantiate a single instance of a gubernator peer and register this -// instance with the provided GRPCServer. -func NewV1Instance(conf Config) (s *V1Instance, err error) { +// NewService instantiate a single instance of a gubernator service +func NewService(conf Config) (s *Service, err error) { ctx := context.Background() - if conf.GRPCServers == nil { - return nil, errors.New("at least one GRPCServer instance is required") - } + if err := conf.SetDefaults(); err != nil { return nil, err } - s = &V1Instance{ + s = &Service{ log: conf.Logger, conf: conf, } @@ -129,12 +128,6 @@ func NewV1Instance(conf Config) (s *V1Instance, err error) { s.workerPool = NewWorkerPool(&conf) s.global = newGlobalManager(conf.Behaviors, s) - // Register our instance with all GRPC servers - for _, srv := range conf.GRPCServers { - RegisterV1Server(srv, s) - RegisterPeersV1Server(srv, s) - } - if s.conf.Loader == nil { return s, nil } @@ -148,9 +141,7 @@ func NewV1Instance(conf Config) (s *V1Instance, err error) { return s, nil } -func (s *V1Instance) Close() (err error) { - ctx := context.Background() - +func (s *Service) Close(ctx context.Context) (err error) { if s.isClosed { return nil } @@ -177,95 +168,101 @@ func (s *V1Instance) Close() (err error) { return nil } -// GetRateLimits is the public interface used by clients to request rate limits from the system. If the +// CheckRateLimits is the public interface used by clients to request rate limits from the system. If the // rate limit `Name` and `UniqueKey` is not owned by this instance, then we forward the request to the // peer that does. -func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*GetRateLimitsResp, error) { - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.GetRateLimits")) +func (s *Service) CheckRateLimits(ctx context.Context, req *CheckRateLimitsRequest, resp *CheckRateLimitsResponse) (err error) { + + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.CheckRateLimits")) defer funcTimer.ObserveDuration() metricConcurrentChecks.Inc() defer metricConcurrentChecks.Dec() - if len(r.Requests) > maxBatchSize { - metricCheckErrorCounter.WithLabelValues("Request too large").Inc() - return nil, status.Errorf(codes.OutOfRange, - "Requests.RateLimits list too large; max size is '%d'", maxBatchSize) + if len(req.Requests) > maxBatchSize { + metricCheckErrorCounter.WithLabelValues("Request too large").Add(1) + return duh.NewServiceError(duh.CodeBadRequest, + fmt.Errorf("CheckRateLimitsRequest.RateLimits list too large; max size is '%d'", maxBatchSize), nil) } - createdAt := epochMillis(clock.Now()) - resp := GetRateLimitsResp{ - Responses: make([]*RateLimitResp, len(r.Requests)), + if len(req.Requests) == 0 { + return duh.NewServiceError(duh.CodeBadRequest, + errors.New("CheckRateLimitsRequest.RateLimits list is empty; provide at least one rate limit"), nil) } + + resp.Responses = make([]*RateLimitResponse, len(req.Requests)) + asyncCh := make(chan AsyncResp, len(req.Requests)) + createdAt := epochMillis(clock.Now()) var wg sync.WaitGroup - asyncCh := make(chan AsyncResp, len(r.Requests)) // For each item in the request body - for i, req := range r.Requests { - key := req.Name + "_" + req.UniqueKey - var peer *PeerClient + for i, r := range req.Requests { + key := r.Name + "_" + r.UniqueKey + var peer *Peer var err error - if req.UniqueKey == "" { + if r.UniqueKey == "" { metricCheckErrorCounter.WithLabelValues("Invalid request").Inc() - resp.Responses[i] = &RateLimitResp{Error: "field 'unique_key' cannot be empty"} + resp.Responses[i] = &RateLimitResponse{Error: "field 'unique_key' cannot be empty"} continue } - if req.Name == "" { + + if r.Name == "" { metricCheckErrorCounter.WithLabelValues("Invalid request").Inc() - resp.Responses[i] = &RateLimitResp{Error: "field 'namespace' cannot be empty"} + resp.Responses[i] = &RateLimitResponse{Error: "field 'namespace' cannot be empty"} continue } - if req.CreatedAt == nil || *req.CreatedAt == 0 { - req.CreatedAt = &createdAt + + if r.CreatedAt == nil || *r.CreatedAt == 0 { + r.CreatedAt = &createdAt } if ctx.Err() != nil { err = errors.Wrap(ctx.Err(), "Error while iterating request items") span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{ + resp.Responses[i] = &RateLimitResponse{ Error: err.Error(), } continue } if s.conf.Behaviors.ForceGlobal { - SetBehavior(&req.Behavior, Behavior_GLOBAL, true) + SetBehavior(&r.Behavior, Behavior_GLOBAL, true) } peer, err = s.GetPeer(ctx, key) if err != nil { countError(err, "Error in GetPeer") err = errors.Wrapf(err, "Error in GetPeer, looking up peer that owns rate limit '%s'", key) - resp.Responses[i] = &RateLimitResp{ + resp.Responses[i] = &RateLimitResponse{ Error: err.Error(), } continue } // If our server instance is the owner of this rate limit - reqState := RateLimitReqState{IsOwner: peer.Info().IsOwner} + reqState := RateLimitRequestState{IsOwner: peer.Info().IsOwner} if reqState.IsOwner { // Apply our rate limit algorithm to the request - resp.Responses[i], err = s.getLocalRateLimit(ctx, req, reqState) + resp.Responses[i], err = s.checkLocalRateLimit(ctx, r, reqState) if err != nil { err = errors.Wrapf(err, "Error while apply rate limit for '%s'", key) span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{Error: err.Error()} + resp.Responses[i] = &RateLimitResponse{Error: err.Error()} } } else { - if HasBehavior(req.Behavior, Behavior_GLOBAL) { - resp.Responses[i], err = s.getGlobalRateLimit(ctx, req) + if HasBehavior(r.Behavior, Behavior_GLOBAL) { + resp.Responses[i], err = s.checkGlobalRateLimit(ctx, r) if err != nil { - err = errors.Wrap(err, "Error in getGlobalRateLimit") + err = errors.Wrap(err, "Error in checkGlobalRateLimit") span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{Error: err.Error()} + resp.Responses[i] = &RateLimitResponse{Error: err.Error()} } // Inform the client of the owner key of the key - resp.Responses[i].Metadata = map[string]string{"owner": peer.Info().GRPCAddress} + resp.Responses[i].Metadata = map[string]string{"owner": peer.Info().HTTPAddress} continue } @@ -275,7 +272,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G go s.asyncRequest(ctx, &AsyncReq{ AsyncCh: asyncCh, Peer: peer, - Req: req, + Req: r, WG: &wg, Key: key, Idx: i, @@ -291,34 +288,33 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G resp.Responses[a.Idx] = a.Resp } - return &resp, nil + return nil } type AsyncResp struct { + Resp *RateLimitResponse Idx int - Resp *RateLimitResp } type AsyncReq struct { + Req *RateLimitRequest WG *sync.WaitGroup AsyncCh chan AsyncResp - Req *RateLimitReq - Peer *PeerClient + Peer *Peer Key string Idx int } -func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { +func (s *Service) asyncRequest(ctx context.Context, req *AsyncReq) { + ctx = tracing.StartNamedScope(ctx, "Service.asyncRequest") + defer tracing.EndScope(ctx, nil) var attempts int var err error - ctx = tracing.StartNamedScope(ctx, "V1Instance.asyncRequest") - defer tracing.EndScope(ctx, nil) - - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.asyncRequest")) + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.asyncRequest")) defer funcTimer.ObserveDuration() - reqState := RateLimitReqState{IsOwner: req.Peer.Info().IsOwner} + reqState := RateLimitRequestState{IsOwner: req.Peer.Info().IsOwner} resp := AsyncResp{ Idx: req.Idx, } @@ -331,28 +327,28 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { Error("GetPeer() returned peer that is not connected") countError(err, "Peer not connected") err = errors.Wrapf(err, "GetPeer() keeps returning peers that are not connected for '%s'", req.Key) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } // If we are attempting again, the owner of this rate limit might have changed to us! if attempts != 0 { if reqState.IsOwner { - resp.Resp, err = s.getLocalRateLimit(ctx, req.Req, reqState) + resp.Resp, err = s.checkLocalRateLimit(ctx, req.Req, reqState) if err != nil { s.log.WithContext(ctx). WithError(err). WithField("key", req.Key). Error("Error applying rate limit") - err = errors.Wrapf(err, "Error in getLocalRateLimit for '%s'", req.Key) - resp.Resp = &RateLimitResp{Error: err.Error()} + err = errors.Wrapf(err, "Error in checkLocalRateLimit for '%s'", req.Key) + resp.Resp = &RateLimitResponse{Error: err.Error()} } break } } // Make an RPC call to the peer that owns this rate limit - r, err := req.Peer.GetPeerRateLimit(ctx, req.Req) + r, err := req.Peer.Forward(ctx, req.Req) if err != nil { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { attempts++ @@ -363,7 +359,7 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { s.log.WithContext(ctx).WithError(err).WithField("key", req.Key).Error(errPart) countError(err, "Error in GetPeer") err = errors.Wrap(err, errPart) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } continue @@ -372,13 +368,13 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { // Not calling `countError()` because we expect the remote end to // report this error. err = errors.Wrap(err, fmt.Sprintf("Error while fetching rate limit '%s' from peer", req.Key)) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } // Inform the client of the owner key of the key resp.Resp = r - resp.Resp.Metadata = map[string]string{"owner": req.Peer.Info().GRPCAddress} + resp.Resp.Metadata = map[string]string{"owner": req.Peer.Info().HTTPAddress} break } @@ -390,14 +386,14 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { } } -// getGlobalRateLimit handles rate limits that are marked as `Behavior = GLOBAL`. Rate limit responses +// checkGlobalRateLimit handles rate limits that are marked as `Behavior = GLOBAL`. Rate limit responses // are returned from the local cache and the hits are queued to be sent to the owning peer. -func (s *V1Instance) getGlobalRateLimit(ctx context.Context, req *RateLimitReq) (resp *RateLimitResp, err error) { - ctx = tracing.StartNamedScope(ctx, "V1Instance.getGlobalRateLimit", trace.WithAttributes( +func (s *Service) checkGlobalRateLimit(ctx context.Context, req *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScope(ctx, "Service.checkGlobalRateLimit", trace.WithAttributes( attribute.String("ratelimit.key", req.UniqueKey), attribute.String("ratelimit.name", req.Name), )) - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getGlobalRateLimit")).ObserveDuration() + defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.checkGlobalRateLimit")).ObserveDuration() defer func() { if err == nil { s.global.QueueHit(req) @@ -405,85 +401,90 @@ func (s *V1Instance) getGlobalRateLimit(ctx context.Context, req *RateLimitReq) tracing.EndScope(ctx, err) }() - req2 := proto.Clone(req).(*RateLimitReq) + req2 := proto.Clone(req).(*RateLimitRequest) SetBehavior(&req2.Behavior, Behavior_NO_BATCHING, true) SetBehavior(&req2.Behavior, Behavior_GLOBAL, false) - reqState := RateLimitReqState{IsOwner: false} + reqState := RateLimitRequestState{IsOwner: false} // Process the rate limit like we own it - resp, err = s.getLocalRateLimit(ctx, req2, reqState) + resp, err = s.checkLocalRateLimit(ctx, req2, reqState) if err != nil { - return nil, errors.Wrap(err, "during in getLocalRateLimit") + return nil, errors.Wrap(err, "during in checkLocalRateLimit") } metricGetRateLimitCounter.WithLabelValues("global").Inc() return resp, nil } -// UpdatePeerGlobals updates the local cache with a list of global rate limits. This method should only -// be called by a peer who is the owner of a global rate limit. -func (s *V1Instance) UpdatePeerGlobals(ctx context.Context, r *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) { - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.UpdatePeerGlobals")).ObserveDuration() +// Update updates the local cache with a list of rate limit state from a peer +// This method should only be called by a peer. +func (s *Service) Update(ctx context.Context, r *UpdateRequest, resp *v1.Reply) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Service.Update") + defer func() { tracing.EndScope(ctx, err) }() + now := MillisecondNow() for _, g := range r.Globals { item := &CacheItem{ - ExpireAt: g.Status.ResetTime, + ExpireAt: g.State.ResetTime, Algorithm: g.Algorithm, Key: g.Key, } + + // TODO(thrawn01): Can I move this to algorithm.go? switch g.Algorithm { case Algorithm_LEAKY_BUCKET: item.Value = &LeakyBucketItem{ - Remaining: float64(g.Status.Remaining), - Limit: g.Status.Limit, + Remaining: float64(g.State.Remaining), + Limit: g.State.Limit, Duration: g.Duration, - Burst: g.Status.Limit, + Burst: g.State.Limit, UpdatedAt: now, } case Algorithm_TOKEN_BUCKET: item.Value = &TokenBucketItem{ - Status: g.Status.Status, - Limit: g.Status.Limit, + Status: g.State.Status, + Limit: g.State.Limit, Duration: g.Duration, - Remaining: g.Status.Remaining, + Remaining: g.State.Remaining, CreatedAt: now, } } err := s.workerPool.AddCacheItem(ctx, g.Key, item) if err != nil { - return nil, errors.Wrap(err, "Error in workerPool.AddCacheItem") + return errors.Wrap(err, "Error in workerPool.AddCacheItem") } } - return &UpdatePeerGlobalsResp{}, nil + resp.Code = duh.CodeOK + return nil } -// GetPeerRateLimits is called by other peers to get the rate limits owned by this peer. -func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimitsReq) (resp *GetPeerRateLimitsResp, err error) { - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.GetPeerRateLimits")).ObserveDuration() - if len(r.Requests) > maxBatchSize { - err := fmt.Errorf("'PeerRequest.rate_limits' list too large; max size is '%d'", maxBatchSize) - metricCheckErrorCounter.WithLabelValues("Request too large").Inc() - return nil, status.Error(codes.OutOfRange, err.Error()) +// Forward is called by other peers when forwarding rate limits to this peer +func (s *Service) Forward(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Service.Forward") + defer func() { tracing.EndScope(ctx, err) }() + + if len(req.Requests) > maxBatchSize { + metricCheckErrorCounter.WithLabelValues("Request too large").Add(1) + return duh.NewServiceError(duh.CodeBadRequest, + fmt.Errorf("'Forward.requests' list too large; max size is '%d'", maxBatchSize), nil) } // Invoke each rate limit request. type reqIn struct { idx int - req *RateLimitReq + req *RateLimitRequest } type respOut struct { idx int - rl *RateLimitResp + rl *RateLimitResponse } - resp = &GetPeerRateLimitsResp{ - RateLimits: make([]*RateLimitResp, len(r.Requests)), - } + resp.RateLimits = make([]*RateLimitResponse, len(req.Requests)) + reqState := RateLimitRequestState{IsOwner: true} respChan := make(chan respOut) var respWg sync.WaitGroup respWg.Add(1) - reqState := RateLimitReqState{IsOwner: true} go func() { // Capture each response and return in the same order @@ -496,12 +497,11 @@ func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimits // Fan out requests. fan := syncutil.NewFanOut(s.conf.Workers) - for idx, req := range r.Requests { + for idx, req := range req.Requests { fan.Run(func(in interface{}) error { rin := in.(reqIn) // Extract the propagated context from the metadata in the request - prop := propagation.TraceContext{} - ctx := prop.Extract(ctx, &MetadataCarrier{Map: rin.req.Metadata}) + ctx := s.propagator.Extract(ctx, &MetadataCarrier{Map: rin.req.Metadata}) // Forwarded global requests must have DRAIN_OVER_LIMIT set so token and leaky algorithms // drain the remaining in the event a peer asks for more than is remaining. @@ -517,12 +517,12 @@ func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimits rin.req.CreatedAt = &createdAt } - rl, err := s.getLocalRateLimit(ctx, rin.req, reqState) + rl, err := s.checkLocalRateLimit(ctx, rin.req, reqState) if err != nil { // Return the error for this request - err = errors.Wrap(err, "Error in getLocalRateLimit") - rl = &RateLimitResp{Error: err.Error()} - // metricCheckErrorCounter is updated within getLocalRateLimit(), not in GetPeerRateLimits. + err = errors.Wrap(err, "Error in checkLocalRateLimit") + rl = &RateLimitResponse{Error: err.Error()} + // metricCheckErrorCounter is updated within checkLocalRateLimit(), not in Forward(). } respChan <- respOut{rin.idx, rl} @@ -535,12 +535,11 @@ func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimits close(respChan) respWg.Wait() - return resp, nil + return nil } // HealthCheck Returns the health of our instance. -func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health *HealthCheckResp, err error) { - span := trace.SpanFromContext(ctx) +func (s *Service) HealthCheck(ctx context.Context, _ *HealthCheckRequest, resp *HealthCheckResponse) (err error) { var errs []string @@ -552,7 +551,6 @@ func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health for _, peer := range localPeers { for _, errMsg := range peer.GetLastErr() { err := fmt.Errorf("error returned from local peer.GetLastErr: %s", errMsg) - span.RecordError(err) errs = append(errs, err.Error()) } } @@ -562,38 +560,30 @@ func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health for _, peer := range regionPeers { for _, errMsg := range peer.GetLastErr() { err := fmt.Errorf("error returned from region peer.GetLastErr: %s", errMsg) - span.RecordError(err) errs = append(errs, err.Error()) } } - health = &HealthCheckResp{ - PeerCount: int32(len(localPeers) + len(regionPeers)), - Status: Healthy, - } + resp.PeerCount = int32(len(localPeers) + len(regionPeers)) + resp.Status = Healthy if len(errs) != 0 { - health.Status = UnHealthy - health.Message = strings.Join(errs, "|") + resp.Status = UnHealthy + resp.Message = strings.Join(errs, "|") } - span.SetAttributes( - attribute.Int64("health.peerCount", int64(health.PeerCount)), - attribute.String("health.status", health.Status), - ) - - return health, nil + return nil } -func (s *V1Instance) getLocalRateLimit(ctx context.Context, r *RateLimitReq, reqState RateLimitReqState) (_ *RateLimitResp, err error) { - ctx = tracing.StartNamedScope(ctx, "V1Instance.getLocalRateLimit", trace.WithAttributes( +func (s *Service) checkLocalRateLimit(ctx context.Context, r *RateLimitRequest, reqState RateLimitRequestState) (_ *RateLimitResponse, err error) { + ctx = tracing.StartNamedScope(ctx, "Service.checkLocalRateLimit", trace.WithAttributes( attribute.String("ratelimit.key", r.UniqueKey), attribute.String("ratelimit.name", r.Name), attribute.Int64("ratelimit.limit", r.Limit), attribute.Int64("ratelimit.hits", r.Hits), )) defer func() { tracing.EndScope(ctx, err) }() - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getLocalRateLimit")).ObserveDuration() + defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.checkLocalRateLimit")).ObserveDuration() resp, err := s.workerPool.GetRateLimit(ctx, r, reqState) if err != nil { @@ -613,7 +603,7 @@ func (s *V1Instance) getLocalRateLimit(ctx context.Context, r *RateLimitReq, req // SetPeers replaces the peers and shuts down all the previous peers. // TODO this should return an error if we failed to connect to any of the new peers -func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { +func (s *Service) SetPeers(peerInfo []PeerInfo) { localPicker := s.conf.LocalPicker.New() regionPicker := s.conf.RegionPicker.New() @@ -624,15 +614,14 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { // If we don't have an existing PeerClient create a new one if peer == nil { var err error - peer, err = NewPeerClient(PeerConfig{ - TraceGRPC: s.conf.PeerTraceGRPC, - Behavior: s.conf.Behaviors, - TLS: s.conf.PeerTLS, - Log: s.log, - Info: info, + peer, err = NewPeer(PeerConfig{ + PeerClient: s.conf.PeerClientFactory(info), + Behavior: s.conf.Behaviors, + Log: s.log, + Info: info, }) if err != nil { - s.log.Errorf("error connecting to peer %s: %s", info.GRPCAddress, err) + s.log.Errorf("during NewPeer() call for '%s' - %s", info.HTTPAddress, err) return } } @@ -643,15 +632,14 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { peer := s.conf.LocalPicker.GetByPeerInfo(info) if peer == nil { var err error - peer, err = NewPeerClient(PeerConfig{ - TraceGRPC: s.conf.PeerTraceGRPC, - Behavior: s.conf.Behaviors, - TLS: s.conf.PeerTLS, - Log: s.log, - Info: info, + peer, err = NewPeer(PeerConfig{ + PeerClient: s.conf.PeerClientFactory(info), + Behavior: s.conf.Behaviors, + Log: s.log, + Info: info, }) if err != nil { - s.log.Errorf("error connecting to peer %s: %s", info.GRPCAddress, err) + s.log.Errorf("during NewPeer() call for '%s' - %s", info.HTTPAddress, err) return } } @@ -673,7 +661,7 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { ctx, cancel := context.WithTimeout(context.Background(), s.conf.Behaviors.BatchTimeout) defer cancel() - var shutdownPeers []*PeerClient + var shutdownPeers []*Peer for _, peer := range oldLocalPicker.Peers() { if peerInfo := s.conf.LocalPicker.GetByPeerInfo(peer.Info()); peerInfo == nil { shutdownPeers = append(shutdownPeers, peer) @@ -691,8 +679,8 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { var wg syncutil.WaitGroup for _, p := range shutdownPeers { wg.Run(func(obj interface{}) error { - pc := obj.(*PeerClient) - err := pc.Shutdown(ctx) + pc := obj.(*Peer) + err := pc.Close(ctx) if err != nil { s.log.WithError(err).WithField("peer", pc).Error("while shutting down peer") } @@ -704,18 +692,17 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { if len(shutdownPeers) > 0 { var peers []string for _, p := range shutdownPeers { - peers = append(peers, p.Info().GRPCAddress) + peers = append(peers, p.Info().HTTPAddress) } s.log.WithField("peers", peers).Debug("peers shutdown") } } // GetPeer returns a peer client for the hash key provided -func (s *V1Instance) GetPeer(ctx context.Context, key string) (p *PeerClient, err error) { - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.GetPeer")).ObserveDuration() - +func (s *Service) GetPeer(_ context.Context, key string) (p *Peer, err error) { s.peerMutex.RLock() defer s.peerMutex.RUnlock() + p, err = s.conf.LocalPicker.Get(key) if err != nil { return nil, errors.Wrap(err, "Error in conf.LocalPicker.Get") @@ -724,20 +711,20 @@ func (s *V1Instance) GetPeer(ctx context.Context, key string) (p *PeerClient, er return p, nil } -func (s *V1Instance) GetPeerList() []*PeerClient { +func (s *Service) GetPeerList() []*Peer { s.peerMutex.RLock() defer s.peerMutex.RUnlock() return s.conf.LocalPicker.Peers() } -func (s *V1Instance) GetRegionPickers() map[string]PeerPicker { +func (s *Service) GetRegionPickers() map[string]PeerPicker { s.peerMutex.RLock() defer s.peerMutex.RUnlock() return s.conf.RegionPicker.Pickers() } // Describe fetches prometheus metrics to be registered -func (s *V1Instance) Describe(ch chan<- *prometheus.Desc) { +func (s *Service) Describe(ch chan<- *prometheus.Desc) { metricBatchQueueLength.Describe(ch) metricBatchSendDuration.Describe(ch) metricBatchSendRetries.Describe(ch) @@ -755,7 +742,7 @@ func (s *V1Instance) Describe(ch chan<- *prometheus.Desc) { } // Collect fetches metrics from the server for use by prometheus -func (s *V1Instance) Collect(ch chan<- prometheus.Metric) { +func (s *Service) Collect(ch chan<- prometheus.Metric) { metricBatchQueueLength.Collect(ch) metricBatchSendDuration.Collect(ch) metricBatchSendRetries.Collect(ch) diff --git a/gubernator.pb.go b/gubernator.pb.go index 305cc2a..0ef9614 100644 --- a/gubernator.pb.go +++ b/gubernator.pb.go @@ -22,7 +22,6 @@ package gubernator import ( - _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -248,17 +247,17 @@ func (Status) EnumDescriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{2} } -// Must specify at least one Request -type GetRateLimitsReq struct { +// Must specify at least one RateLimitRequest +type CheckRateLimitsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Requests []*RateLimitReq `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + Requests []*RateLimitRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (x *GetRateLimitsReq) Reset() { - *x = GetRateLimitsReq{} +func (x *CheckRateLimitsRequest) Reset() { + *x = CheckRateLimitsRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -266,13 +265,13 @@ func (x *GetRateLimitsReq) Reset() { } } -func (x *GetRateLimitsReq) String() string { +func (x *CheckRateLimitsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRateLimitsReq) ProtoMessage() {} +func (*CheckRateLimitsRequest) ProtoMessage() {} -func (x *GetRateLimitsReq) ProtoReflect() protoreflect.Message { +func (x *CheckRateLimitsRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -284,29 +283,29 @@ func (x *GetRateLimitsReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRateLimitsReq.ProtoReflect.Descriptor instead. -func (*GetRateLimitsReq) Descriptor() ([]byte, []int) { +// Deprecated: Use CheckRateLimitsRequest.ProtoReflect.Descriptor instead. +func (*CheckRateLimitsRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{0} } -func (x *GetRateLimitsReq) GetRequests() []*RateLimitReq { +func (x *CheckRateLimitsRequest) GetRequests() []*RateLimitRequest { if x != nil { return x.Requests } return nil } -// RateLimits returned are in the same order as the Requests -type GetRateLimitsResp struct { +// RateLimits returned are in the same order provided in CheckRateLimitsRequest +type CheckRateLimitsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Responses []*RateLimitResp `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + Responses []*RateLimitResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` } -func (x *GetRateLimitsResp) Reset() { - *x = GetRateLimitsResp{} +func (x *CheckRateLimitsResponse) Reset() { + *x = CheckRateLimitsResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -314,13 +313,13 @@ func (x *GetRateLimitsResp) Reset() { } } -func (x *GetRateLimitsResp) String() string { +func (x *CheckRateLimitsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRateLimitsResp) ProtoMessage() {} +func (*CheckRateLimitsResponse) ProtoMessage() {} -func (x *GetRateLimitsResp) ProtoReflect() protoreflect.Message { +func (x *CheckRateLimitsResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -332,19 +331,19 @@ func (x *GetRateLimitsResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRateLimitsResp.ProtoReflect.Descriptor instead. -func (*GetRateLimitsResp) Descriptor() ([]byte, []int) { +// Deprecated: Use CheckRateLimitsResponse.ProtoReflect.Descriptor instead. +func (*CheckRateLimitsResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{1} } -func (x *GetRateLimitsResp) GetResponses() []*RateLimitResp { +func (x *CheckRateLimitsResponse) GetResponses() []*RateLimitResponse { if x != nil { return x.Responses } return nil } -type RateLimitReq struct { +type RateLimitRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -352,7 +351,7 @@ type RateLimitReq struct { // The name of the rate limit IE: 'requests_per_second', 'gets_per_minute` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Uniquely identifies this rate limit IE: 'ip:10.2.10.7' or 'account:123445' - UniqueKey string `protobuf:"bytes,2,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"` + UniqueKey string `protobuf:"bytes,2,opt,name=unique_key,proto3" json:"unique_key,omitempty"` // Rate limit requests optionally specify the number of hits a request adds to the matched limit. If Hit // is zero, the request returns the current limit, but does not increment the hit count. Hits int64 `protobuf:"varint,3,opt,name=hits,proto3" json:"hits,omitempty"` @@ -365,9 +364,9 @@ type RateLimitReq struct { Duration int64 `protobuf:"varint,5,opt,name=duration,proto3" json:"duration,omitempty"` // The algorithm used to calculate the rate limit. The algorithm may change on // subsequent requests, when this occurs any previous rate limit hit counts are reset. - Algorithm Algorithm `protobuf:"varint,6,opt,name=algorithm,proto3,enum=pb.gubernator.Algorithm" json:"algorithm,omitempty"` + Algorithm Algorithm `protobuf:"varint,6,opt,name=algorithm,proto3,enum=gubernator.v3.Algorithm" json:"algorithm,omitempty"` // Behavior is a set of int32 flags that control the behavior of the rate limit in gubernator - Behavior Behavior `protobuf:"varint,7,opt,name=behavior,proto3,enum=pb.gubernator.Behavior" json:"behavior,omitempty"` + Behavior Behavior `protobuf:"varint,7,opt,name=behavior,proto3,enum=gubernator.v3.Behavior" json:"behavior,omitempty"` // Maximum burst size that the limit can accept. Burst int64 `protobuf:"varint,8,opt,name=burst,proto3" json:"burst,omitempty"` // This is metadata that is associated with this rate limit. Peer to Peer communication will use @@ -387,8 +386,8 @@ type RateLimitReq struct { CreatedAt *int64 `protobuf:"varint,10,opt,name=created_at,json=createdAt,proto3,oneof" json:"created_at,omitempty"` } -func (x *RateLimitReq) Reset() { - *x = RateLimitReq{} +func (x *RateLimitRequest) Reset() { + *x = RateLimitRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -396,13 +395,13 @@ func (x *RateLimitReq) Reset() { } } -func (x *RateLimitReq) String() string { +func (x *RateLimitRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RateLimitReq) ProtoMessage() {} +func (*RateLimitRequest) ProtoMessage() {} -func (x *RateLimitReq) ProtoReflect() protoreflect.Message { +func (x *RateLimitRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -414,102 +413,102 @@ func (x *RateLimitReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitReq.ProtoReflect.Descriptor instead. -func (*RateLimitReq) Descriptor() ([]byte, []int) { +// Deprecated: Use RateLimitRequest.ProtoReflect.Descriptor instead. +func (*RateLimitRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{2} } -func (x *RateLimitReq) GetName() string { +func (x *RateLimitRequest) GetName() string { if x != nil { return x.Name } return "" } -func (x *RateLimitReq) GetUniqueKey() string { +func (x *RateLimitRequest) GetUniqueKey() string { if x != nil { return x.UniqueKey } return "" } -func (x *RateLimitReq) GetHits() int64 { +func (x *RateLimitRequest) GetHits() int64 { if x != nil { return x.Hits } return 0 } -func (x *RateLimitReq) GetLimit() int64 { +func (x *RateLimitRequest) GetLimit() int64 { if x != nil { return x.Limit } return 0 } -func (x *RateLimitReq) GetDuration() int64 { +func (x *RateLimitRequest) GetDuration() int64 { if x != nil { return x.Duration } return 0 } -func (x *RateLimitReq) GetAlgorithm() Algorithm { +func (x *RateLimitRequest) GetAlgorithm() Algorithm { if x != nil { return x.Algorithm } return Algorithm_TOKEN_BUCKET } -func (x *RateLimitReq) GetBehavior() Behavior { +func (x *RateLimitRequest) GetBehavior() Behavior { if x != nil { return x.Behavior } return Behavior_BATCHING } -func (x *RateLimitReq) GetBurst() int64 { +func (x *RateLimitRequest) GetBurst() int64 { if x != nil { return x.Burst } return 0 } -func (x *RateLimitReq) GetMetadata() map[string]string { +func (x *RateLimitRequest) GetMetadata() map[string]string { if x != nil { return x.Metadata } return nil } -func (x *RateLimitReq) GetCreatedAt() int64 { +func (x *RateLimitRequest) GetCreatedAt() int64 { if x != nil && x.CreatedAt != nil { return *x.CreatedAt } return 0 } -type RateLimitResp struct { +type RateLimitResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The status of the rate limit. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=pb.gubernator.Status" json:"status,omitempty"` - // The currently configured request limit (Identical to [[RateLimitReq.limit]]). + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=gubernator.v3.Status" json:"status,omitempty"` + // The currently configured request limit (Identical to [[RateLimitRequest.limit]]). Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` // This is the number of requests remaining before the rate limit is hit but after subtracting the hits from the current request Remaining int64 `protobuf:"varint,3,opt,name=remaining,proto3" json:"remaining,omitempty"` // This is the time when the rate limit span will be reset, provided as a unix timestamp in milliseconds. - ResetTime int64 `protobuf:"varint,4,opt,name=reset_time,json=resetTime,proto3" json:"reset_time,omitempty"` + ResetTime int64 `protobuf:"varint,4,opt,name=reset_time,proto3" json:"reset_time,omitempty"` // Contains the error; If set all other values should be ignored Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` // This is additional metadata that a client might find useful. (IE: Additional headers, coordinator ownership, etc..) Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *RateLimitResp) Reset() { - *x = RateLimitResp{} +func (x *RateLimitResponse) Reset() { + *x = RateLimitResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -517,13 +516,13 @@ func (x *RateLimitResp) Reset() { } } -func (x *RateLimitResp) String() string { +func (x *RateLimitResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RateLimitResp) ProtoMessage() {} +func (*RateLimitResponse) ProtoMessage() {} -func (x *RateLimitResp) ProtoReflect() protoreflect.Message { +func (x *RateLimitResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -535,61 +534,61 @@ func (x *RateLimitResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitResp.ProtoReflect.Descriptor instead. -func (*RateLimitResp) Descriptor() ([]byte, []int) { +// Deprecated: Use RateLimitResponse.ProtoReflect.Descriptor instead. +func (*RateLimitResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{3} } -func (x *RateLimitResp) GetStatus() Status { +func (x *RateLimitResponse) GetStatus() Status { if x != nil { return x.Status } return Status_UNDER_LIMIT } -func (x *RateLimitResp) GetLimit() int64 { +func (x *RateLimitResponse) GetLimit() int64 { if x != nil { return x.Limit } return 0 } -func (x *RateLimitResp) GetRemaining() int64 { +func (x *RateLimitResponse) GetRemaining() int64 { if x != nil { return x.Remaining } return 0 } -func (x *RateLimitResp) GetResetTime() int64 { +func (x *RateLimitResponse) GetResetTime() int64 { if x != nil { return x.ResetTime } return 0 } -func (x *RateLimitResp) GetError() string { +func (x *RateLimitResponse) GetError() string { if x != nil { return x.Error } return "" } -func (x *RateLimitResp) GetMetadata() map[string]string { +func (x *RateLimitResponse) GetMetadata() map[string]string { if x != nil { return x.Metadata } return nil } -type HealthCheckReq struct { +type HealthCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *HealthCheckReq) Reset() { - *x = HealthCheckReq{} +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -597,13 +596,13 @@ func (x *HealthCheckReq) Reset() { } } -func (x *HealthCheckReq) String() string { +func (x *HealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HealthCheckReq) ProtoMessage() {} +func (*HealthCheckRequest) ProtoMessage() {} -func (x *HealthCheckReq) ProtoReflect() protoreflect.Message { +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -615,12 +614,12 @@ func (x *HealthCheckReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HealthCheckReq.ProtoReflect.Descriptor instead. -func (*HealthCheckReq) Descriptor() ([]byte, []int) { +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{4} } -type HealthCheckResp struct { +type HealthCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -629,12 +628,13 @@ type HealthCheckResp struct { Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` // If 'unhealthy', message indicates the problem Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // TODO: Test JSON name ===========!!!! // The number of peers we know about - PeerCount int32 `protobuf:"varint,3,opt,name=peer_count,json=peerCount,proto3" json:"peer_count,omitempty"` + PeerCount int32 `protobuf:"varint,3,opt,name=peer_count,proto3" json:"peer_count,omitempty"` } -func (x *HealthCheckResp) Reset() { - *x = HealthCheckResp{} +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -642,13 +642,13 @@ func (x *HealthCheckResp) Reset() { } } -func (x *HealthCheckResp) String() string { +func (x *HealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HealthCheckResp) ProtoMessage() {} +func (*HealthCheckResponse) ProtoMessage() {} -func (x *HealthCheckResp) ProtoReflect() protoreflect.Message { +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -660,26 +660,26 @@ func (x *HealthCheckResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HealthCheckResp.ProtoReflect.Descriptor instead. -func (*HealthCheckResp) Descriptor() ([]byte, []int) { +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{5} } -func (x *HealthCheckResp) GetStatus() string { +func (x *HealthCheckResponse) GetStatus() string { if x != nil { return x.Status } return "" } -func (x *HealthCheckResp) GetMessage() string { +func (x *HealthCheckResponse) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *HealthCheckResp) GetPeerCount() int32 { +func (x *HealthCheckResponse) GetPeerCount() int32 { if x != nil { return x.PeerCount } @@ -690,106 +690,93 @@ var File_gubernator_proto protoreflect.FileDescriptor var file_gubernator_proto_rawDesc = []byte{ 0x0a, 0x10, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0d, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x4b, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, - 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, - 0x65, 0x71, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x12, 0x3a, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, 0xc1, 0x03, - 0x0a, 0x0c, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x68, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x33, 0x0a, 0x08, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x2e, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x08, 0x62, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, - 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x41, 0x74, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x22, 0xac, 0x02, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, - 0x74, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x61, - 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x6d, - 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x46, 0x0a, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x10, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x22, 0x62, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x33, 0x22, 0x55, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x59, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x22, 0xca, 0x03, 0x0a, 0x10, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x68, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x68, 0x69, 0x74, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, + 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x33, 0x0a, 0x08, 0x62, 0x65, + 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, + 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x08, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x62, 0x75, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x22, 0xb5, 0x02, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, + 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, + 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x73, + 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, + 0x65, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x4a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x14, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x67, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x65, 0x65, - 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x2a, 0x2f, 0x0a, 0x09, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x42, 0x55, 0x43, - 0x4b, 0x45, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x45, 0x41, 0x4b, 0x59, 0x5f, 0x42, - 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x01, 0x2a, 0x8d, 0x01, 0x0a, 0x08, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x02, 0x12, - 0x19, 0x0a, 0x15, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x53, 0x5f, 0x47, - 0x52, 0x45, 0x47, 0x4f, 0x52, 0x49, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, - 0x53, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x4d, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x12, - 0x10, 0x0a, 0x0c, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x10, - 0x10, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x5f, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x20, 0x2a, 0x29, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, - 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, - 0x10, 0x01, 0x32, 0xdd, 0x01, 0x0a, 0x02, 0x56, 0x31, 0x12, 0x70, 0x0a, 0x0d, 0x47, 0x65, 0x74, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x20, 0x2e, 0x70, 0x62, - 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x22, 0x1c, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x3a, 0x01, 0x2a, 0x22, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x47, 0x65, - 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x1a, 0x1e, 0x2e, 0x70, 0x62, 0x2e, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x42, 0x28, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x69, 0x6f, 0x2f, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x80, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2a, 0x2f, 0x0a, 0x09, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x45, 0x41, 0x4b, 0x59, 0x5f, + 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x01, 0x2a, 0x8d, 0x01, 0x0a, 0x08, 0x42, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x53, 0x5f, + 0x47, 0x52, 0x45, 0x47, 0x4f, 0x52, 0x49, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, + 0x45, 0x53, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x4d, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x08, + 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, + 0x10, 0x10, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x56, 0x45, 0x52, + 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x20, 0x2a, 0x29, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, 0x49, + 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, 0x49, + 0x54, 0x10, 0x01, 0x42, 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x69, 0x6f, 0x2f, + 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -807,32 +794,28 @@ func file_gubernator_proto_rawDescGZIP() []byte { var file_gubernator_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_gubernator_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_gubernator_proto_goTypes = []interface{}{ - (Algorithm)(0), // 0: pb.gubernator.Algorithm - (Behavior)(0), // 1: pb.gubernator.Behavior - (Status)(0), // 2: pb.gubernator.Status - (*GetRateLimitsReq)(nil), // 3: pb.gubernator.GetRateLimitsReq - (*GetRateLimitsResp)(nil), // 4: pb.gubernator.GetRateLimitsResp - (*RateLimitReq)(nil), // 5: pb.gubernator.RateLimitReq - (*RateLimitResp)(nil), // 6: pb.gubernator.RateLimitResp - (*HealthCheckReq)(nil), // 7: pb.gubernator.HealthCheckReq - (*HealthCheckResp)(nil), // 8: pb.gubernator.HealthCheckResp - nil, // 9: pb.gubernator.RateLimitReq.MetadataEntry - nil, // 10: pb.gubernator.RateLimitResp.MetadataEntry + (Algorithm)(0), // 0: gubernator.v3.Algorithm + (Behavior)(0), // 1: gubernator.v3.Behavior + (Status)(0), // 2: gubernator.v3.Status + (*CheckRateLimitsRequest)(nil), // 3: gubernator.v3.CheckRateLimitsRequest + (*CheckRateLimitsResponse)(nil), // 4: gubernator.v3.CheckRateLimitsResponse + (*RateLimitRequest)(nil), // 5: gubernator.v3.RateLimitRequest + (*RateLimitResponse)(nil), // 6: gubernator.v3.RateLimitResponse + (*HealthCheckRequest)(nil), // 7: gubernator.v3.HealthCheckRequest + (*HealthCheckResponse)(nil), // 8: gubernator.v3.HealthCheckResponse + nil, // 9: gubernator.v3.RateLimitRequest.MetadataEntry + nil, // 10: gubernator.v3.RateLimitResponse.MetadataEntry } var file_gubernator_proto_depIdxs = []int32{ - 5, // 0: pb.gubernator.GetRateLimitsReq.requests:type_name -> pb.gubernator.RateLimitReq - 6, // 1: pb.gubernator.GetRateLimitsResp.responses:type_name -> pb.gubernator.RateLimitResp - 0, // 2: pb.gubernator.RateLimitReq.algorithm:type_name -> pb.gubernator.Algorithm - 1, // 3: pb.gubernator.RateLimitReq.behavior:type_name -> pb.gubernator.Behavior - 9, // 4: pb.gubernator.RateLimitReq.metadata:type_name -> pb.gubernator.RateLimitReq.MetadataEntry - 2, // 5: pb.gubernator.RateLimitResp.status:type_name -> pb.gubernator.Status - 10, // 6: pb.gubernator.RateLimitResp.metadata:type_name -> pb.gubernator.RateLimitResp.MetadataEntry - 3, // 7: pb.gubernator.V1.GetRateLimits:input_type -> pb.gubernator.GetRateLimitsReq - 7, // 8: pb.gubernator.V1.HealthCheck:input_type -> pb.gubernator.HealthCheckReq - 4, // 9: pb.gubernator.V1.GetRateLimits:output_type -> pb.gubernator.GetRateLimitsResp - 8, // 10: pb.gubernator.V1.HealthCheck:output_type -> pb.gubernator.HealthCheckResp - 9, // [9:11] is the sub-list for method output_type - 7, // [7:9] is the sub-list for method input_type + 5, // 0: gubernator.v3.CheckRateLimitsRequest.requests:type_name -> gubernator.v3.RateLimitRequest + 6, // 1: gubernator.v3.CheckRateLimitsResponse.responses:type_name -> gubernator.v3.RateLimitResponse + 0, // 2: gubernator.v3.RateLimitRequest.algorithm:type_name -> gubernator.v3.Algorithm + 1, // 3: gubernator.v3.RateLimitRequest.behavior:type_name -> gubernator.v3.Behavior + 9, // 4: gubernator.v3.RateLimitRequest.metadata:type_name -> gubernator.v3.RateLimitRequest.MetadataEntry + 2, // 5: gubernator.v3.RateLimitResponse.status:type_name -> gubernator.v3.Status + 10, // 6: gubernator.v3.RateLimitResponse.metadata:type_name -> gubernator.v3.RateLimitResponse.MetadataEntry + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name 7, // [7:7] is the sub-list for extension extendee 0, // [0:7] is the sub-list for field type_name @@ -845,7 +828,7 @@ func file_gubernator_proto_init() { } if !protoimpl.UnsafeEnabled { file_gubernator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRateLimitsReq); i { + switch v := v.(*CheckRateLimitsRequest); i { case 0: return &v.state case 1: @@ -857,7 +840,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRateLimitsResp); i { + switch v := v.(*CheckRateLimitsResponse); i { case 0: return &v.state case 1: @@ -869,7 +852,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitReq); i { + switch v := v.(*RateLimitRequest); i { case 0: return &v.state case 1: @@ -881,7 +864,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitResp); i { + switch v := v.(*RateLimitResponse); i { case 0: return &v.state case 1: @@ -893,7 +876,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckReq); i { + switch v := v.(*HealthCheckRequest); i { case 0: return &v.state case 1: @@ -905,7 +888,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResp); i { + switch v := v.(*HealthCheckResponse); i { case 0: return &v.state case 1: @@ -926,7 +909,7 @@ func file_gubernator_proto_init() { NumEnums: 3, NumMessages: 8, NumExtensions: 0, - NumServices: 1, + NumServices: 0, }, GoTypes: file_gubernator_proto_goTypes, DependencyIndexes: file_gubernator_proto_depIdxs, diff --git a/gubernator.pb.gw.go b/gubernator.pb.gw.go deleted file mode 100644 index bb46059..0000000 --- a/gubernator.pb.gw.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: gubernator.proto - -/* -Package gubernator is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gubernator - -import ( - "context" - "io" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - -func request_V1_GetRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client V1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_V1_GetRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server V1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetRateLimits(ctx, &protoReq) - return msg, metadata, err - -} - -func request_V1_HealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client V1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HealthCheckReq - var metadata runtime.ServerMetadata - - msg, err := client.HealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_V1_HealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, server V1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HealthCheckReq - var metadata runtime.ServerMetadata - - msg, err := server.HealthCheck(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterV1HandlerServer registers the http handlers for service V1 to "mux". -// UnaryRPC :call V1Server directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterV1HandlerFromEndpoint instead. -func RegisterV1HandlerServer(ctx context.Context, mux *runtime.ServeMux, server V1Server) error { - - mux.Handle("POST", pattern_V1_GetRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.V1/GetRateLimits", runtime.WithHTTPPathPattern("/v1/GetRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_V1_GetRateLimits_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_GetRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_V1_HealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.V1/HealthCheck", runtime.WithHTTPPathPattern("/v1/HealthCheck")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_V1_HealthCheck_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_HealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterV1HandlerFromEndpoint is same as RegisterV1Handler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterV1HandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterV1Handler(ctx, mux, conn) -} - -// RegisterV1Handler registers the http handlers for service V1 to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterV1Handler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterV1HandlerClient(ctx, mux, NewV1Client(conn)) -} - -// RegisterV1HandlerClient registers the http handlers for service V1 -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "V1Client". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "V1Client" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "V1Client" to call the correct interceptors. -func RegisterV1HandlerClient(ctx context.Context, mux *runtime.ServeMux, client V1Client) error { - - mux.Handle("POST", pattern_V1_GetRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.V1/GetRateLimits", runtime.WithHTTPPathPattern("/v1/GetRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_V1_GetRateLimits_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_GetRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_V1_HealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.V1/HealthCheck", runtime.WithHTTPPathPattern("/v1/HealthCheck")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_V1_HealthCheck_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_HealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_V1_GetRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "GetRateLimits"}, "")) - - pattern_V1_HealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "HealthCheck"}, "")) -) - -var ( - forward_V1_GetRateLimits_0 = runtime.ForwardResponseMessage - - forward_V1_HealthCheck_0 = runtime.ForwardResponseMessage -) diff --git a/gubernator.proto b/gubernator.proto index 626eabc..ee4c9cb 100644 --- a/gubernator.proto +++ b/gubernator.proto @@ -17,40 +17,17 @@ limitations under the License. syntax = "proto3"; option go_package = "github.com/gubernator-io/gubernator"; +package gubernator.v3; -option cc_generic_services = true; -package pb.gubernator; - -import "google/api/annotations.proto"; - -service V1 { - - // Given a list of rate limit requests, return the rate limits of each. - rpc GetRateLimits (GetRateLimitsReq) returns (GetRateLimitsResp) { - option (google.api.http) = { - post: "/v1/GetRateLimits" - body: "*" - }; - } - - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - rpc HealthCheck (HealthCheckReq) returns (HealthCheckResp) { - option (google.api.http) = { - get: "/v1/HealthCheck" - }; - } -} - -// Must specify at least one Request -message GetRateLimitsReq { - repeated RateLimitReq requests = 1; +// Must specify at least one RateLimitRequest +message CheckRateLimitsRequest { + repeated RateLimitRequest requests = 1; } -// RateLimits returned are in the same order as the Requests -message GetRateLimitsResp { - repeated RateLimitResp responses = 1; +// RateLimits returned are in the same order provided in CheckRateLimitsRequest +message CheckRateLimitsResponse { + repeated RateLimitResponse responses = 1; } enum Algorithm { @@ -134,12 +111,12 @@ enum Behavior { // TODO: Add support for LOCAL. Which would force the rate limit to be handled by the local instance } -message RateLimitReq { +message RateLimitRequest { // The name of the rate limit IE: 'requests_per_second', 'gets_per_minute` string name = 1; // Uniquely identifies this rate limit IE: 'ip:10.2.10.7' or 'account:123445' - string unique_key = 2; + string unique_key = 2 [json_name="unique_key"]; // Rate limit requests optionally specify the number of hits a request adds to the matched limit. If Hit // is zero, the request returns the current limit, but does not increment the hit count. @@ -187,27 +164,28 @@ enum Status { OVER_LIMIT = 1; } -message RateLimitResp { +message RateLimitResponse { // The status of the rate limit. Status status = 1; - // The currently configured request limit (Identical to [[RateLimitReq.limit]]). + // The currently configured request limit (Identical to [[RateLimitRequest.limit]]). int64 limit = 2; // This is the number of requests remaining before the rate limit is hit but after subtracting the hits from the current request int64 remaining = 3; // This is the time when the rate limit span will be reset, provided as a unix timestamp in milliseconds. - int64 reset_time = 4; + int64 reset_time = 4 [json_name="reset_time"]; // Contains the error; If set all other values should be ignored string error = 5; // This is additional metadata that a client might find useful. (IE: Additional headers, coordinator ownership, etc..) map metadata = 6; } -message HealthCheckReq {} -message HealthCheckResp { +message HealthCheckRequest {} +message HealthCheckResponse { // Valid entries are 'healthy' or 'unhealthy' string status = 1; // If 'unhealthy', message indicates the problem string message = 2; + // TODO: Test JSON name ===========!!!! // The number of peers we know about - int32 peer_count = 3; + int32 peer_count = 3 [json_name="peer_count"]; } diff --git a/gubernator_grpc.pb.go b/gubernator_grpc.pb.go deleted file mode 100644 index 209b75a..0000000 --- a/gubernator_grpc.pb.go +++ /dev/null @@ -1,165 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: gubernator.proto - -package gubernator - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - V1_GetRateLimits_FullMethodName = "/pb.gubernator.V1/GetRateLimits" - V1_HealthCheck_FullMethodName = "/pb.gubernator.V1/HealthCheck" -) - -// V1Client is the client API for V1 service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type V1Client interface { - // Given a list of rate limit requests, return the rate limits of each. - GetRateLimits(ctx context.Context, in *GetRateLimitsReq, opts ...grpc.CallOption) (*GetRateLimitsResp, error) - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - HealthCheck(ctx context.Context, in *HealthCheckReq, opts ...grpc.CallOption) (*HealthCheckResp, error) -} - -type v1Client struct { - cc grpc.ClientConnInterface -} - -func NewV1Client(cc grpc.ClientConnInterface) V1Client { - return &v1Client{cc} -} - -func (c *v1Client) GetRateLimits(ctx context.Context, in *GetRateLimitsReq, opts ...grpc.CallOption) (*GetRateLimitsResp, error) { - out := new(GetRateLimitsResp) - err := c.cc.Invoke(ctx, V1_GetRateLimits_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *v1Client) HealthCheck(ctx context.Context, in *HealthCheckReq, opts ...grpc.CallOption) (*HealthCheckResp, error) { - out := new(HealthCheckResp) - err := c.cc.Invoke(ctx, V1_HealthCheck_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// V1Server is the server API for V1 service. -// All implementations should embed UnimplementedV1Server -// for forward compatibility -type V1Server interface { - // Given a list of rate limit requests, return the rate limits of each. - GetRateLimits(context.Context, *GetRateLimitsReq) (*GetRateLimitsResp, error) - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - HealthCheck(context.Context, *HealthCheckReq) (*HealthCheckResp, error) -} - -// UnimplementedV1Server should be embedded to have forward compatible implementations. -type UnimplementedV1Server struct { -} - -func (UnimplementedV1Server) GetRateLimits(context.Context, *GetRateLimitsReq) (*GetRateLimitsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRateLimits not implemented") -} -func (UnimplementedV1Server) HealthCheck(context.Context, *HealthCheckReq) (*HealthCheckResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented") -} - -// UnsafeV1Server may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to V1Server will -// result in compilation errors. -type UnsafeV1Server interface { - mustEmbedUnimplementedV1Server() -} - -func RegisterV1Server(s grpc.ServiceRegistrar, srv V1Server) { - s.RegisterService(&V1_ServiceDesc, srv) -} - -func _V1_GetRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRateLimitsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(V1Server).GetRateLimits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: V1_GetRateLimits_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(V1Server).GetRateLimits(ctx, req.(*GetRateLimitsReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _V1_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(V1Server).HealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: V1_HealthCheck_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(V1Server).HealthCheck(ctx, req.(*HealthCheckReq)) - } - return interceptor(ctx, in, info, handler) -} - -// V1_ServiceDesc is the grpc.ServiceDesc for V1 service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var V1_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pb.gubernator.V1", - HandlerType: (*V1Server)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetRateLimits", - Handler: _V1_GetRateLimits_Handler, - }, - { - MethodName: "HealthCheck", - Handler: _V1_HealthCheck_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "gubernator.proto", -} diff --git a/handler.go b/handler.go new file mode 100644 index 0000000..f2c9cc7 --- /dev/null +++ b/handler.go @@ -0,0 +1,168 @@ +package gubernator + +import ( + "context" + "fmt" + "net/http" + + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/propagation" +) + +const ( + RPCPeerForward = "/v1/peer.forward" + RPCPeerUpdate = "/v1/peer.update" + RPCRateLimitCheck = "/v1/rate-limit.check" + RPCHealthCheck = "/v1/health.check" +) + +type Handler struct { + prop propagation.TraceContext + duration *prometheus.SummaryVec + metrics http.Handler + service *Service +} + +func NewHandler(s *Service, metrics http.Handler) *Handler { + return &Handler{ + duration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "gubernator_http_handler_duration", + Help: "The timings of http requests handled by the service", + Objectives: map[float64]float64{ + 0.5: 0.05, + 0.99: 0.001, + }, + }, []string{"path"}), + metrics: metrics, + service: s, + } +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer prometheus.NewTimer(h.duration.WithLabelValues(r.URL.Path)).ObserveDuration() + ctx := h.prop.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + + switch r.URL.Path { + case RPCPeerForward: + h.PeerForward(ctx, w, r) + return + case RPCPeerUpdate: + h.PeerUpdate(ctx, w, r) + return + case RPCRateLimitCheck: + h.CheckRateLimit(ctx, w, r) + return + case RPCHealthCheck: + h.HealthCheck(w, r) + return + case "/metrics": + h.metrics.ServeHTTP(w, r) + return + case "/healthz": + h.HealthZ(w, r) + return + } + duh.ReplyWithCode(w, r, duh.CodeNotImplemented, nil, "no such method; "+r.URL.Path) +} + +func (h *Handler) PeerForward(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req ForwardRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp ForwardResponse + if err := h.service.Forward(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) PeerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req UpdateRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp v1.Reply + if err := h.service.Update(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) CheckRateLimit(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req CheckRateLimitsRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + + var resp CheckRateLimitsResponse + if err := h.service.CheckRateLimits(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) HealthCheck(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req HealthCheckRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp HealthCheckResponse + if err := h.service.HealthCheck(r.Context(), &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) HealthZ(w http.ResponseWriter, r *http.Request) { + var resp HealthCheckResponse + if err := h.service.HealthCheck(r.Context(), nil, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +// Describe fetches prometheus metrics to be registered +func (h *Handler) Describe(ch chan<- *prometheus.Desc) { + h.duration.Describe(ch) +} + +// Collect fetches metrics from the server for use by prometheus +func (h *Handler) Collect(ch chan<- prometheus.Metric) { + h.duration.Collect(ch) +} diff --git a/interval_test.go b/interval_test.go index e1d439c..6bb1003 100644 --- a/interval_test.go +++ b/interval_test.go @@ -19,7 +19,7 @@ package gubernator_test import ( "testing" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/kubernetes.go b/kubernetes.go index e25ebf7..818e116 100644 --- a/kubernetes.go +++ b/kubernetes.go @@ -195,7 +195,7 @@ main: e.log.Errorf("expected type v1.Endpoints got '%s' instead", reflect.TypeOf(obj).String()) } - peer := PeerInfo{GRPCAddress: fmt.Sprintf("%s:%s", pod.Status.PodIP, e.conf.PodPort)} + peer := PeerInfo{HTTPAddress: fmt.Sprintf("%s:%s", pod.Status.PodIP, e.conf.PodPort)} // if containers are not ready or not running then skip this peer for _, status := range pod.Status.ContainerStatuses { @@ -228,7 +228,7 @@ func (e *K8sPool) updatePeersFromEndpoints() { // TODO(thrawn01): Might consider using the `namespace` as the `DataCenter`. We should // do what ever k8s convention is for identifying a k8s cluster within a federated multi-data // center setup. - peer := PeerInfo{GRPCAddress: fmt.Sprintf("%s:%s", addr.IP, e.conf.PodPort)} + peer := PeerInfo{HTTPAddress: fmt.Sprintf("%s:%s", addr.IP, e.conf.PodPort)} if addr.IP == e.conf.PodIP { peer.IsOwner = true diff --git a/lrucache_test.go b/lrucache_test.go index 51f33bc..d2a6622 100644 --- a/lrucache_test.go +++ b/lrucache_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" diff --git a/memberlist.go b/memberlist.go index 1d65434..c9e2e16 100644 --- a/memberlist.go +++ b/memberlist.go @@ -224,7 +224,7 @@ func (e *memberListEventHandler) callOnUpdate() { var peers []PeerInfo for _, p := range e.peers { - if p.GRPCAddress == e.conf.Advertise.GRPCAddress { + if p.HTTPAddress == e.conf.Advertise.HTTPAddress { p.IsOwner = true } peers = append(peers, p) @@ -260,7 +260,7 @@ func unmarshallPeer(b []byte, ip string) (PeerInfo, error) { if metadata.AdvertiseAddress == "" { metadata.AdvertiseAddress = makeAddress(ip, metadata.GubernatorPort) } - return PeerInfo{GRPCAddress: metadata.AdvertiseAddress, DataCenter: metadata.DataCenter}, nil + return PeerInfo{HTTPAddress: metadata.AdvertiseAddress, DataCenter: metadata.DataCenter}, nil } return peer, nil } diff --git a/metadata_carrier_test.go b/metadata_carrier_test.go index 618a288..1b27191 100644 --- a/metadata_carrier_test.go +++ b/metadata_carrier_test.go @@ -19,7 +19,7 @@ package gubernator_test import ( "testing" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/stretchr/testify/assert" ) diff --git a/mock_cache_test.go b/mock_cache_test.go index 3eea640..96e9942 100644 --- a/mock_cache_test.go +++ b/mock_cache_test.go @@ -19,7 +19,7 @@ package gubernator_test // Mock implementation of Cache. import ( - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" "github.com/stretchr/testify/mock" ) diff --git a/mock_loader_test.go b/mock_loader_test.go index 4c58e84..8b22aa0 100644 --- a/mock_loader_test.go +++ b/mock_loader_test.go @@ -19,7 +19,7 @@ package gubernator_test // Mock implementation of Loader. import ( - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" "github.com/stretchr/testify/mock" ) diff --git a/mock_store_test.go b/mock_store_test.go index 8a2f356..72c8d3f 100644 --- a/mock_store_test.go +++ b/mock_store_test.go @@ -21,7 +21,7 @@ package gubernator_test import ( "context" - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" "github.com/stretchr/testify/mock" ) @@ -31,11 +31,11 @@ type MockStore2 struct { var _ guber.Store = &MockStore2{} -func (m *MockStore2) OnChange(ctx context.Context, r *guber.RateLimitReq, item *guber.CacheItem) { +func (m *MockStore2) OnChange(ctx context.Context, r *guber.RateLimitRequest, item *guber.CacheItem) { m.Called(ctx, r, item) } -func (m *MockStore2) Get(ctx context.Context, r *guber.RateLimitReq) (*guber.CacheItem, bool) { +func (m *MockStore2) Get(ctx context.Context, r *guber.RateLimitRequest) (*guber.CacheItem, bool) { args := m.Called(ctx, r) retval, _ := args.Get(0).(*guber.CacheItem) return retval, args.Bool(1) diff --git a/peer.go b/peer.go new file mode 100644 index 0000000..d9fd345 --- /dev/null +++ b/peer.go @@ -0,0 +1,413 @@ +/* +Copyright 2018-2023 Mailgun Technologies Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gubernator + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/mailgun/errors" + "github.com/mailgun/holster/v4/clock" + "github.com/mailgun/holster/v4/collections" + "github.com/mailgun/holster/v4/ctxutil" + "github.com/mailgun/holster/v4/setter" + "github.com/mailgun/holster/v4/tracing" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type Peer struct { + lastErrs *collections.LRUCache + wg sync.WaitGroup + queue chan *request + mutex sync.RWMutex + client PeerClient + Conf PeerConfig + inShutdown int64 +} + +type response struct { + rl *RateLimitResponse + err error +} + +type request struct { + reqState RateLimitRequestState + request *RateLimitRequest + ctx context.Context + resp chan *response +} + +type PeerConfig struct { + PeerClient PeerClient + Behavior BehaviorConfig + Info PeerInfo + Log FieldLogger +} + +type PeerClient interface { + Forward(context.Context, *ForwardRequest, *ForwardResponse) error + Update(context.Context, *UpdateRequest) error +} + +func NewPeer(conf PeerConfig) (*Peer, error) { + if len(conf.Info.HTTPAddress) == 0 { + return nil, errors.New("Peer.Info.HTTPAddress is empty; must provide an address") + } + + setter.SetDefault(&conf.PeerClient, NewPeerClient(WithNoTLS(conf.Info.HTTPAddress))) + setter.SetDefault(&conf.Log, logrus.WithField("category", "Peer")) + + p := &Peer{ + lastErrs: collections.NewLRUCache(100), + queue: make(chan *request, 1000), + client: conf.PeerClient, + Conf: conf, + } + go p.run() + return p, nil +} + +// Info returns PeerInfo struct that describes this Peer +func (p *Peer) Info() PeerInfo { + return p.Conf.Info +} + +var ( + // TODO: Should retry in this case + ErrPeerShutdown = errors.New("peer is in shutdown; try a different peer") +) + +// Forward forwards a rate limit request to a peer. +// If the rate limit has `behavior == BATCHING` configured, this method will attempt to batch the rate limits +func (p *Peer) Forward(ctx context.Context, r *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScope(ctx, "Peer.Forward") + defer func() { tracing.EndScope(ctx, err) }() + span := trace.SpanFromContext(ctx) + span.SetAttributes( + attribute.String("peer.HTTPAddress", p.Conf.Info.HTTPAddress), + attribute.String("peer.Datacenter", p.Conf.Info.DataCenter), + attribute.String("request.key", r.UniqueKey), + attribute.String("request.name", r.Name), + attribute.Int64("request.algorithm", int64(r.Algorithm)), + attribute.Int64("request.behavior", int64(r.Behavior)), + attribute.Int64("request.duration", r.Duration), + attribute.Int64("request.limit", r.Limit), + attribute.Int64("request.hits", r.Hits), + attribute.Int64("request.burst", r.Burst), + ) + + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil, ErrPeerShutdown + } + + // NOTE: Add() must be done within the RLock since we must ensure all in-flight Forward() + // requests are done before calls to Close() can complete. We can't just wg.Wait() for + // since there may be Forward() call that is executing at this very code spot when Close() + // is called. In that scenario wg.Add() and wg.Wait() are in a race. + p.mutex.RLock() + p.wg.Add(1) + defer func() { + p.mutex.RUnlock() + defer p.wg.Done() + }() + + // If config asked for no batching + if HasBehavior(r.Behavior, Behavior_NO_BATCHING) { + // If no metadata is provided + if r.Metadata == nil { + r.Metadata = make(map[string]string) + } + // Propagate the trace context along with the rate limit so + // peers can continue to report traces for this rate limit. + prop := propagation.TraceContext{} + prop.Inject(ctx, &MetadataCarrier{Map: r.Metadata}) + + // Forward a single rate limit + var fr ForwardResponse + err = p.ForwardBatch(ctx, &ForwardRequest{ + Requests: []*RateLimitRequest{r}, + }, &fr) + if err != nil { + err = errors.Wrap(err, "Error in forward") + return nil, p.setLastErr(err) + } + return fr.RateLimits[0], nil + } + + resp, err = p.forwardBatch(ctx, r) + if err != nil { + err = errors.Wrap(err, "Error in forwardBatch") + return nil, p.setLastErr(err) + } + + return resp, nil +} + +// ForwardBatch requests a list of rate limit statuses from a peer +func (p *Peer) ForwardBatch(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Peer.forward") + defer func() { tracing.EndScope(ctx, err) }() + + if err = p.client.Forward(ctx, req, resp); err != nil { + return p.setLastErr(errors.Wrap(err, "Error in client.Forward()")) + } + + // Unlikely, but this avoids a panic if something wonky happens + if len(resp.RateLimits) != len(req.Requests) { + return p.setLastErr( + errors.New("number of rate limits in peer response does not match request")) + } + return nil +} + +// Update sends rate limit status updates to a peer +func (p *Peer) Update(ctx context.Context, req *UpdateRequest) (err error) { + ctx = tracing.StartNamedScope(ctx, "Peer.Update") + defer func() { tracing.EndScope(ctx, err) }() + + err = p.client.Update(ctx, req) + if err != nil { + _ = p.setLastErr(err) + } + return err +} + +func (p *Peer) GetLastErr() []string { + var errs []string + keys := p.lastErrs.Keys() + + // Get errors from each key in the cache + for _, key := range keys { + err, ok := p.lastErrs.Get(key) + if ok { + errs = append(errs, err.(error).Error()) + } + } + + return errs +} + +// Close will gracefully close all client connections, until the context is canceled +func (p *Peer) Close(ctx context.Context) error { + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil + } + + atomic.AddInt64(&p.inShutdown, 1) + + // This allows us to wait on the wait group, or until the context + // has been canceled. + waitChan := make(chan struct{}) + go func() { + p.mutex.Lock() + p.wg.Wait() + close(p.queue) + p.mutex.Unlock() + close(waitChan) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-waitChan: + return nil + } +} + +func (p *Peer) forwardBatch(ctx context.Context, r *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Peer.forwardBatch") + defer func() { tracing.EndScope(ctx, err) }() + + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Peer.forwardBatch")) + defer funcTimer.ObserveDuration() + + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil, p.setLastErr(ErrPeerShutdown) + } + + // Wait for a response or context cancel + ctx2 := tracing.StartNamedScopeDebug(ctx, "Wait for response") + defer tracing.EndScope(ctx2, nil) + + req := request{ + resp: make(chan *response, 1), + ctx: ctx2, + request: r, + } + + // Enqueue the request to be sent + peerAddr := p.Info().HTTPAddress + metricBatchQueueLength.WithLabelValues(peerAddr).Set(float64(len(p.queue))) + + select { + case p.queue <- &req: + // Successfully enqueued request. + case <-ctx2.Done(): + return nil, errors.Wrap(ctx2.Err(), "Context error while enqueuing request") + } + + p.wg.Add(1) + defer func() { + p.wg.Done() + }() + + select { + case re := <-req.resp: + if re.err != nil { + err := errors.Wrap(p.setLastErr(re.err), "Request error") + return nil, p.setLastErr(err) + } + return re.rl, nil + case <-ctx2.Done(): + return nil, errors.Wrap(ctx2.Err(), "Context error while waiting for response") + } +} + +// run waits for requests to be queued, when either c.batchWait time +// has elapsed or the queue reaches c.batchLimit. Send what is in the queue. +func (p *Peer) run() { + var interval = NewInterval(p.Conf.Behavior.BatchWait) + defer interval.Stop() + + var queue []*request + + for { + select { + case r, ok := <-p.queue: + // If the queue has closed, we need to send the rest of the queue + if !ok { + if len(queue) > 0 { + p.sendBatch(queue) + } + return + } + + queue = append(queue, r) + // Send the queue if we reached our batch limit + if len(queue) >= p.Conf.Behavior.BatchLimit { + p.Conf.Log.WithFields(logrus.Fields{ + "queueLen": len(queue), + "batchLimit": p.Conf.Behavior.BatchLimit, + }).Debug("run() reached batch limit") + ref := queue + queue = nil + go p.sendBatch(ref) + continue + } + + // If this is our first enqueued item since last + // sendBatch, reset interval timer. + if len(queue) == 1 { + interval.Next() + } + continue + + case <-interval.C: + queue2 := queue + + if len(queue2) > 0 { + queue = nil + go p.sendBatch(queue2) + } + } + } +} + +// sendBatch sends the queue provided and returns the responses to +// waiting go routines +func (p *Peer) sendBatch(queue []*request) { + ctx := tracing.StartNamedScopeDebug(context.Background(), "Peer.sendBatch") + defer tracing.EndScope(ctx, nil) + + batchSendTimer := prometheus.NewTimer(metricBatchSendDuration.WithLabelValues(p.Conf.Info.HTTPAddress)) + defer batchSendTimer.ObserveDuration() + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Peer.sendBatch")) + defer funcTimer.ObserveDuration() + + var req ForwardRequest + for _, r := range queue { + // NOTE: This trace has the same name because it's in a separate trace than the one above. + // We link the two traces, so we can relate our rate limit trace back to the above trace. + r.ctx = tracing.StartNamedScopeDebug(r.ctx, "Peer.sendBatch", + trace.WithLinks(trace.LinkFromContext(ctx))) + // If no metadata is provided + if r.request.Metadata == nil { + r.request.Metadata = make(map[string]string) + } + // Propagate the trace context along with the batched rate limit so + // peers can continue to report traces for this rate limit. + prop := propagation.TraceContext{} + prop.Inject(r.ctx, &MetadataCarrier{Map: r.request.Metadata}) + req.Requests = append(req.Requests, r.request) + tracing.EndScope(r.ctx, nil) + + } + + ctx, cancel := ctxutil.WithTimeout(ctx, p.Conf.Behavior.BatchTimeout) + var resp ForwardResponse + err := p.client.Forward(ctx, &req, &resp) + cancel() + + // An error here indicates the entire request failed + if err != nil { + err = errors.Wrap(err, "Error in client.forward") + p.Conf.Log.WithFields(logrus.Fields{ + "batchTimeout": p.Conf.Behavior.BatchTimeout.String(), + "queueLen": len(queue), + "error": err, + }).Error("Error in client.forward") + _ = p.setLastErr(err) + + for _, r := range queue { + r.resp <- &response{err: err} + } + return + } + + // Unlikely, but this avoids a panic if something wonky happens + if len(resp.RateLimits) != len(queue) { + for _, r := range queue { + r.resp <- &response{err: errors.New("server responded with incorrect rate limit list size")} + } + return + } + + // Provide responses to channels waiting in the queue + for i, r := range queue { + r.resp <- &response{rl: resp.RateLimits[i]} + } +} + +func (p *Peer) setLastErr(err error) error { + // If we get a nil error return without caching it + if err == nil { + return err + } + + // Add error to the cache with a TTL of 5 minutes + p.lastErrs.AddWithTTL(err.Error(), + errors.Wrap(err, fmt.Sprintf("from host %s", p.Conf.Info.HTTPAddress)), + clock.Minute*5) + + return err +} diff --git a/peer.pb.go b/peer.pb.go new file mode 100644 index 0000000..22fd4d3 --- /dev/null +++ b/peer.pb.go @@ -0,0 +1,425 @@ +// +//Copyright 2018-2022 Mailgun Technologies Inc +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc (unknown) +// source: peer.proto + +package gubernator + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ForwardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must specify at least one RateLimit. The peer that receives this request MUST be authoritative for + // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers + Requests []*RateLimitRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +} + +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForwardRequest) ProtoMessage() {} + +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{0} +} + +func (x *ForwardRequest) GetRequests() []*RateLimitRequest { + if x != nil { + return x.Requests + } + return nil +} + +type ForwardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Responses are in the same order as they appeared in the PeerRateLimitRequestuests + RateLimits []*RateLimitResponse `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` +} + +func (x *ForwardResponse) Reset() { + *x = ForwardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForwardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForwardResponse) ProtoMessage() {} + +func (x *ForwardResponse) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardResponse.ProtoReflect.Descriptor instead. +func (*ForwardResponse) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{1} +} + +func (x *ForwardResponse) GetRateLimits() []*RateLimitResponse { + if x != nil { + return x.RateLimits + } + return nil +} + +type UpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must specify at least one RateLimit + Globals []*UpdateRateLimit `protobuf:"bytes,1,rep,name=globals,proto3" json:"globals,omitempty"` +} + +func (x *UpdateRequest) Reset() { + *x = UpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRequest) ProtoMessage() {} + +func (x *UpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateRequest) GetGlobals() []*UpdateRateLimit { + if x != nil { + return x.Globals + } + return nil +} + +type UpdateRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Uniquely identifies this rate limit IE: 'ip:10.2.10.7' or 'account:123445' + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The rate limit state to update + State *RateLimitResponse `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // The algorithm used to calculate the rate limit. The algorithm may change on + // subsequent requests, when this occurs any previous rate limit hit counts are reset. + Algorithm Algorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=gubernator.v3.Algorithm" json:"algorithm,omitempty"` + // The duration of the rate limit in milliseconds + Duration int64 `protobuf:"varint,4,opt,name=duration,proto3" json:"duration,omitempty"` + // The exact time the original request was created in Epoch milliseconds. + // Due to time drift between systems, it may be advantageous for a client to + // set the exact time the request was created. It possible the system clock + // for the client has drifted from the system clock where gubernator daemon + // is running. + // + // The created time is used by gubernator to calculate the reset time for + // both token and leaky algorithms. If it is not set by the client, + // gubernator will set the created time when it receives the rate limit + // request. + CreatedAt int64 `protobuf:"varint,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (x *UpdateRateLimit) Reset() { + *x = UpdateRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRateLimit) ProtoMessage() {} + +func (x *UpdateRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRateLimit.ProtoReflect.Descriptor instead. +func (*UpdateRateLimit) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateRateLimit) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *UpdateRateLimit) GetState() *RateLimitResponse { + if x != nil { + return x.State + } + return nil +} + +func (x *UpdateRateLimit) GetAlgorithm() Algorithm { + if x != nil { + return x.Algorithm + } + return Algorithm_TOKEN_BUCKET +} + +func (x *UpdateRateLimit) GetDuration() int64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *UpdateRateLimit) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +var File_peer_proto protoreflect.FileDescriptor + +var file_peer_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x75, + 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x10, 0x67, 0x75, 0x62, + 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4d, 0x0a, + 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x54, 0x0a, 0x0f, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x22, 0xce, 0x01, + 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x25, + 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x75, 0x62, + 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x75, 0x62, 0x65, 0x72, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_peer_proto_rawDescOnce sync.Once + file_peer_proto_rawDescData = file_peer_proto_rawDesc +) + +func file_peer_proto_rawDescGZIP() []byte { + file_peer_proto_rawDescOnce.Do(func() { + file_peer_proto_rawDescData = protoimpl.X.CompressGZIP(file_peer_proto_rawDescData) + }) + return file_peer_proto_rawDescData +} + +var file_peer_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_peer_proto_goTypes = []interface{}{ + (*ForwardRequest)(nil), // 0: gubernator.v3.ForwardRequest + (*ForwardResponse)(nil), // 1: gubernator.v3.ForwardResponse + (*UpdateRequest)(nil), // 2: gubernator.v3.UpdateRequest + (*UpdateRateLimit)(nil), // 3: gubernator.v3.UpdateRateLimit + (*RateLimitRequest)(nil), // 4: gubernator.v3.RateLimitRequest + (*RateLimitResponse)(nil), // 5: gubernator.v3.RateLimitResponse + (Algorithm)(0), // 6: gubernator.v3.Algorithm +} +var file_peer_proto_depIdxs = []int32{ + 4, // 0: gubernator.v3.ForwardRequest.requests:type_name -> gubernator.v3.RateLimitRequest + 5, // 1: gubernator.v3.ForwardResponse.rate_limits:type_name -> gubernator.v3.RateLimitResponse + 3, // 2: gubernator.v3.UpdateRequest.globals:type_name -> gubernator.v3.UpdateRateLimit + 5, // 3: gubernator.v3.UpdateRateLimit.state:type_name -> gubernator.v3.RateLimitResponse + 6, // 4: gubernator.v3.UpdateRateLimit.algorithm:type_name -> gubernator.v3.Algorithm + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_peer_proto_init() } +func file_peer_proto_init() { + if File_peer_proto != nil { + return + } + file_gubernator_proto_init() + if !protoimpl.UnsafeEnabled { + file_peer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_peer_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_peer_proto_goTypes, + DependencyIndexes: file_peer_proto_depIdxs, + MessageInfos: file_peer_proto_msgTypes, + }.Build() + File_peer_proto = out.File + file_peer_proto_rawDesc = nil + file_peer_proto_goTypes = nil + file_peer_proto_depIdxs = nil +} diff --git a/peers.proto b/peer.proto similarity index 68% rename from peers.proto rename to peer.proto index 4976bc5..87858a6 100644 --- a/peers.proto +++ b/peer.proto @@ -18,41 +18,31 @@ syntax = "proto3"; option go_package = "github.com/gubernator-io/gubernator"; -option cc_generic_services = true; - -package pb.gubernator; +package gubernator.v3; import "gubernator.proto"; -// NOTE: For use by gubernator peers only -service PeersV1 { - // Used by peers to relay batches of requests to an owner peer - rpc GetPeerRateLimits (GetPeerRateLimitsReq) returns (GetPeerRateLimitsResp) {} - - // Used by owner peers to send global rate limit updates to non-owner peers - rpc UpdatePeerGlobals (UpdatePeerGlobalsReq) returns (UpdatePeerGlobalsResp) {} -} - -message GetPeerRateLimitsReq { - // Must specify at least one RateLimit. The peer that recives this request MUST be authoritative for +message ForwardRequest { + // Must specify at least one RateLimit. The peer that receives this request MUST be authoritative for // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers - repeated RateLimitReq requests = 1; + repeated RateLimitRequest requests = 1; } -message GetPeerRateLimitsResp { - // Responses are in the same order as they appeared in the PeerRateLimitRequests - repeated RateLimitResp rate_limits = 1; +message ForwardResponse { + // Responses are in the same order as they appeared in the PeerRateLimitRequestuests + repeated RateLimitResponse rate_limits = 1; } -message UpdatePeerGlobalsReq { +message UpdateRequest { // Must specify at least one RateLimit - repeated UpdatePeerGlobal globals = 1; + repeated UpdateRateLimit globals = 1; } -message UpdatePeerGlobal { +message UpdateRateLimit { // Uniquely identifies this rate limit IE: 'ip:10.2.10.7' or 'account:123445' string key = 1; - RateLimitResp status = 2; + // The rate limit state to update + RateLimitResponse state = 2; // The algorithm used to calculate the rate limit. The algorithm may change on // subsequent requests, when this occurs any previous rate limit hit counts are reset. Algorithm algorithm = 3; @@ -69,5 +59,4 @@ message UpdatePeerGlobal { // gubernator will set the created time when it receives the rate limit // request. int64 created_at = 5; -} -message UpdatePeerGlobalsResp {} +} \ No newline at end of file diff --git a/peer_client.go b/peer_client.go deleted file mode 100644 index 5e2fef1..0000000 --- a/peer_client.go +++ /dev/null @@ -1,435 +0,0 @@ -/* -Copyright 2018-2022 Mailgun Technologies Inc - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gubernator - -import ( - "context" - "crypto/tls" - "fmt" - "sync" - "sync/atomic" - - "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/collections" - "github.com/mailgun/holster/v4/errors" - "github.com/mailgun/holster/v4/tracing" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" -) - -type PeerPicker interface { - GetByPeerInfo(PeerInfo) *PeerClient - Peers() []*PeerClient - Get(string) (*PeerClient, error) - New() PeerPicker - Add(*PeerClient) -} - -type PeerClient struct { - client PeersV1Client - conn *grpc.ClientConn - conf PeerConfig - queue chan *request - queueClosed atomic.Bool - lastErrs *collections.LRUCache - - wgMutex sync.RWMutex - wg sync.WaitGroup // Monitor the number of in-flight requests. GUARDED_BY(wgMutex) -} - -type response struct { - rl *RateLimitResp - err error -} - -type request struct { - request *RateLimitReq - reqState RateLimitReqState - resp chan *response - ctx context.Context -} - -type PeerConfig struct { - TLS *tls.Config - Behavior BehaviorConfig - Info PeerInfo - Log FieldLogger - TraceGRPC bool -} - -// NewPeerClient tries to establish a connection to a peer in a non-blocking fashion. -// If batching is enabled, it also starts a goroutine where batches will be processed. -func NewPeerClient(conf PeerConfig) (*PeerClient, error) { - peerClient := &PeerClient{ - queue: make(chan *request, 1000), - conf: conf, - lastErrs: collections.NewLRUCache(100), - } - var opts []grpc.DialOption - - if conf.TraceGRPC { - opts = []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler()), - } - } - - if conf.TLS != nil { - opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(conf.TLS))) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - var err error - peerClient.conn, err = grpc.Dial(conf.Info.GRPCAddress, opts...) - if err != nil { - return nil, err - } - peerClient.client = NewPeersV1Client(peerClient.conn) - - if !conf.Behavior.DisableBatching { - go peerClient.runBatch() - } - return peerClient, nil -} - -// Info returns PeerInfo struct that describes this PeerClient -func (c *PeerClient) Info() PeerInfo { - return c.conf.Info -} - -// GetPeerRateLimit forwards a rate limit request to a peer. If the rate limit has `behavior == BATCHING` configured, -// this method will attempt to batch the rate limits -func (c *PeerClient) GetPeerRateLimit(ctx context.Context, r *RateLimitReq) (resp *RateLimitResp, err error) { - span := trace.SpanFromContext(ctx) - span.SetAttributes( - attribute.String("ratelimit.key", r.UniqueKey), - attribute.String("ratelimit.name", r.Name), - ) - - // If config asked for no batching - if c.conf.Behavior.DisableBatching || HasBehavior(r.Behavior, Behavior_NO_BATCHING) { - // If no metadata is provided - if r.Metadata == nil { - r.Metadata = make(map[string]string) - } - // Propagate the trace context along with the rate limit so - // peers can continue to report traces for this rate limit. - prop := propagation.TraceContext{} - prop.Inject(ctx, &MetadataCarrier{Map: r.Metadata}) - - // Send a single low latency rate limit request - resp, err := c.GetPeerRateLimits(ctx, &GetPeerRateLimitsReq{ - Requests: []*RateLimitReq{r}, - }) - if err != nil { - err = errors.Wrap(err, "Error in GetPeerRateLimits") - return nil, c.setLastErr(err) - } - return resp.RateLimits[0], nil - } - - resp, err = c.getPeerRateLimitsBatch(ctx, r) - if err != nil { - err = errors.Wrap(err, "Error in getPeerRateLimitsBatch") - return nil, c.setLastErr(err) - } - - return resp, nil -} - -// GetPeerRateLimits requests a list of rate limit statuses from a peer -func (c *PeerClient) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimitsReq) (resp *GetPeerRateLimitsResp, err error) { - // NOTE: This must be done within the Lock since calling Wait() in Shutdown() causes - // a race condition if called within a separate go routine if the internal wg is `0` - // when Wait() is called then Add(1) is called concurrently. - c.wgMutex.Lock() - c.wg.Add(1) - c.wgMutex.Unlock() - defer c.wg.Done() - - resp, err = c.client.GetPeerRateLimits(ctx, r) - if err != nil { - err = errors.Wrap(err, "Error in client.GetPeerRateLimits") - // metricCheckErrorCounter is updated within client.GetPeerRateLimits(). - return nil, c.setLastErr(err) - } - - // Unlikely, but this avoids a panic if something wonky happens - if len(resp.RateLimits) != len(r.Requests) { - err = errors.New("number of rate limits in peer response does not match request") - metricCheckErrorCounter.WithLabelValues("Item mismatch").Add(1) - return nil, c.setLastErr(err) - } - return resp, nil -} - -// UpdatePeerGlobals sends global rate limit status updates to a peer -func (c *PeerClient) UpdatePeerGlobals(ctx context.Context, r *UpdatePeerGlobalsReq) (resp *UpdatePeerGlobalsResp, err error) { - - // See NOTE above about RLock and wg.Add(1) - c.wgMutex.Lock() - c.wg.Add(1) - c.wgMutex.Unlock() - defer c.wg.Done() - - resp, err = c.client.UpdatePeerGlobals(ctx, r) - if err != nil { - _ = c.setLastErr(err) - } - - return resp, err -} - -func (c *PeerClient) setLastErr(err error) error { - // If we get a nil error return without caching it - if err == nil { - return err - } - - // Prepend client address to error - errWithHostname := errors.Wrap(err, fmt.Sprintf("from host %s", c.conf.Info.GRPCAddress)) - key := err.Error() - - // Add error to the cache with a TTL of 5 minutes - c.lastErrs.AddWithTTL(key, errWithHostname, clock.Minute*5) - - return err -} - -func (c *PeerClient) GetLastErr() []string { - var errs []string - keys := c.lastErrs.Keys() - - // Get errors from each key in the cache - for _, key := range keys { - err, ok := c.lastErrs.Get(key) - if ok { - errs = append(errs, err.(error).Error()) - } - } - - return errs -} - -func (c *PeerClient) getPeerRateLimitsBatch(ctx context.Context, r *RateLimitReq) (resp *RateLimitResp, err error) { - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.getPeerRateLimitsBatch")) - defer funcTimer.ObserveDuration() - - req := request{ - resp: make(chan *response, 1), - ctx: ctx, - request: r, - } - - c.wgMutex.Lock() - c.wg.Add(1) - c.wgMutex.Unlock() - defer c.wg.Done() - - // Enqueue the request to be sent - peerAddr := c.Info().GRPCAddress - metricBatchQueueLength.WithLabelValues(peerAddr).Set(float64(len(c.queue))) - - if c.queueClosed.Load() { - // this check prevents "panic: send on close channel" - return nil, status.Error(codes.Canceled, "grpc: the client connection is closing") - } - - select { - case c.queue <- &req: - // Successfully enqueued request. - case <-ctx.Done(): - return nil, errors.Wrap(ctx.Err(), "Context error while enqueuing request") - } - - // Wait for a response or context cancel - select { - case re := <-req.resp: - if re.err != nil { - err := errors.Wrap(c.setLastErr(re.err), "Request error") - return nil, c.setLastErr(err) - } - return re.rl, nil - case <-ctx.Done(): - return nil, errors.Wrap(ctx.Err(), "Context error while waiting for response") - } -} - -// runBatch processes batching requests by waiting for requests to be queued. Send -// the queue as a batch when either c.batchWait time has elapsed or the queue -// reaches c.batchLimit. -func (c *PeerClient) runBatch() { - var interval = NewInterval(c.conf.Behavior.BatchWait) - defer interval.Stop() - - var queue []*request - - for { - ctx := context.Background() - - select { - case r, ok := <-c.queue: - if !ok { - // If the queue has shutdown, we need to send the rest of the queue - if len(queue) > 0 { - c.sendBatch(ctx, queue) - } - return - } - - queue = append(queue, r) - // Send the queue if we reached our batch limit - if len(queue) >= c.conf.Behavior.BatchLimit { - c.conf.Log.WithContext(ctx). - WithFields(logrus.Fields{ - "queueLen": len(queue), - "batchLimit": c.conf.Behavior.BatchLimit, - }). - Debug("runBatch() reached batch limit") - ref := queue - queue = nil - go c.sendBatch(ctx, ref) - continue - } - - // If this is our first enqueued item since last - // sendBatch, reset interval timer. - if len(queue) == 1 { - interval.Next() - } - continue - - case <-interval.C: - queue2 := queue - - if len(queue2) > 0 { - queue = nil - - go func() { - c.sendBatch(ctx, queue2) - }() - } - } - } -} - -// sendBatch sends the queue provided and returns the responses to -// waiting go routines -func (c *PeerClient) sendBatch(ctx context.Context, queue []*request) { - batchSendTimer := prometheus.NewTimer(metricBatchSendDuration.WithLabelValues(c.conf.Info.GRPCAddress)) - defer batchSendTimer.ObserveDuration() - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.sendBatch")) - defer funcTimer.ObserveDuration() - - var req GetPeerRateLimitsReq - for _, r := range queue { - // NOTE: This trace has the same name because it's in a separate trace than the one above. - // We link the two traces, so we can relate our rate limit trace back to the above trace. - r.ctx = tracing.StartNamedScope(r.ctx, "PeerClient.sendBatch", - trace.WithLinks(trace.LinkFromContext(ctx))) - // If no metadata is provided - if r.request.Metadata == nil { - r.request.Metadata = make(map[string]string) - } - // Propagate the trace context along with the batched rate limit so - // peers can continue to report traces for this rate limit. - prop := propagation.TraceContext{} - prop.Inject(r.ctx, &MetadataCarrier{Map: r.request.Metadata}) - req.Requests = append(req.Requests, r.request) - tracing.EndScope(r.ctx, nil) - } - - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, c.conf.Behavior.BatchTimeout) - resp, err := c.client.GetPeerRateLimits(timeoutCtx, &req) - timeoutCancel() - - // An error here indicates the entire request failed - if err != nil { - logPart := "Error in client.GetPeerRateLimits" - c.conf.Log.WithContext(ctx). - WithError(err). - WithFields(logrus.Fields{ - "queueLen": len(queue), - "batchTimeout": c.conf.Behavior.BatchTimeout.String(), - }). - Error(logPart) - err = errors.Wrap(err, logPart) - _ = c.setLastErr(err) - // metricCheckErrorCounter is updated within client.GetPeerRateLimits(). - - for _, r := range queue { - r.resp <- &response{err: err} - } - return - } - - // Unlikely, but this avoids a panic if something wonky happens - if len(resp.RateLimits) != len(queue) { - err = errors.New("server responded with incorrect rate limit list size") - - for _, r := range queue { - metricCheckErrorCounter.WithLabelValues("Item mismatch").Add(1) - r.resp <- &response{err: err} - } - return - } - - // Provide responses to channels waiting in the queue - for i, r := range queue { - r.resp <- &response{rl: resp.RateLimits[i]} - } -} - -// Shutdown waits until all outstanding requests have finished or the context is cancelled. -// Then it closes the grpc connection. -func (c *PeerClient) Shutdown(ctx context.Context) error { - // ensure we don't leak goroutines, even if the Shutdown times out - defer c.conn.Close() - - waitChan := make(chan struct{}) - go func() { - // drain in-flight requests - c.wgMutex.Lock() - defer c.wgMutex.Unlock() - c.wg.Wait() - - // clear errors - c.lastErrs = collections.NewLRUCache(100) - - // signal that no more items will be sent - c.queueClosed.Store(true) - close(c.queue) - - close(waitChan) - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-waitChan: - return nil - } -} diff --git a/peer_client_test.go b/peer_test.go similarity index 58% rename from peer_client_test.go rename to peer_test.go index f445677..4198edc 100644 --- a/peer_client_test.go +++ b/peer_test.go @@ -19,27 +19,23 @@ package gubernator_test import ( "context" "runtime" - "strings" + "sync" "testing" - "github.com/gubernator-io/gubernator/v2" - "github.com/gubernator-io/gubernator/v2/cluster" + "github.com/gubernator-io/gubernator/v3" + "github.com/gubernator-io/gubernator/v3/cluster" "github.com/mailgun/holster/v4/clock" - "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" ) func TestPeerClientShutdown(t *testing.T) { - type test struct { - Name string - Behavior gubernator.Behavior - } - const threads = 10 - createdAt := epochMillis(clock.Now()) - cases := []test{ + cases := []struct { + Name string + Behavior gubernator.Behavior + }{ {"No batching", gubernator.Behavior_NO_BATCHING}, {"Batching", gubernator.Behavior_BATCHING}, {"Global", gubernator.Behavior_GLOBAL}, @@ -59,45 +55,46 @@ func TestPeerClientShutdown(t *testing.T) { c := cases[i] t.Run(c.Name, func(t *testing.T) { - client, err := gubernator.NewPeerClient(gubernator.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), + client, err := gubernator.NewPeer(gubernator.PeerConfig{ + Info: cluster.GetRandomPeerInfo(cluster.DataCenterNone), Behavior: config, }) require.NoError(t, err) - wg := errgroup.Group{} - wg.SetLimit(threads) - // Spawn a whole bunch of concurrent requests to test shutdown in various states - for j := 0; j < threads; j++ { - wg.Go(func() error { + wg := sync.WaitGroup{} + wg.Add(threads) + // Spawn a bunch of concurrent requests to test shutdown in various states + for i := 0; i < threads; i++ { + go func(client *gubernator.Peer, behavior gubernator.Behavior) { + defer wg.Done() ctx := context.Background() - _, err := client.GetPeerRateLimit(ctx, &gubernator.RateLimitReq{ - Hits: 1, - Limit: 100, - Behavior: c.Behavior, - CreatedAt: &createdAt, + _, err := client.Forward(ctx, &gubernator.RateLimitRequest{ + Hits: 1, + Limit: 100, + Behavior: behavior, }) - if err != nil { - if !strings.Contains(err.Error(), "client connection is closing") { - return errors.Wrap(err, "unexpected error in test") - } + isExpectedErr := false + + // TODO(thrawn01): what is this testing? + switch err.(type) { + case nil: + isExpectedErr = true } - return nil - }) + + assert.True(t, true, isExpectedErr) + + }(client, c.Behavior) } // yield the processor that way we allow other goroutines to start their request runtime.Gosched() - shutDownErr := client.Shutdown(context.Background()) + err = client.Close(context.Background()) + assert.NoError(t, err) - err = wg.Wait() - if err != nil { - t.Error(err) - t.Fail() - } - require.NoError(t, shutDownErr) + wg.Wait() }) + } } diff --git a/peers.pb.go b/peers.pb.go deleted file mode 100644 index 85a5749..0000000 --- a/peers.pb.go +++ /dev/null @@ -1,495 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc (unknown) -// source: peers.proto - -package gubernator - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type GetPeerRateLimitsReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must specify at least one RateLimit. The peer that recives this request MUST be authoritative for - // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers - Requests []*RateLimitReq `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` -} - -func (x *GetPeerRateLimitsReq) Reset() { - *x = GetPeerRateLimitsReq{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPeerRateLimitsReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPeerRateLimitsReq) ProtoMessage() {} - -func (x *GetPeerRateLimitsReq) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPeerRateLimitsReq.ProtoReflect.Descriptor instead. -func (*GetPeerRateLimitsReq) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{0} -} - -func (x *GetPeerRateLimitsReq) GetRequests() []*RateLimitReq { - if x != nil { - return x.Requests - } - return nil -} - -type GetPeerRateLimitsResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Responses are in the same order as they appeared in the PeerRateLimitRequests - RateLimits []*RateLimitResp `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` -} - -func (x *GetPeerRateLimitsResp) Reset() { - *x = GetPeerRateLimitsResp{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPeerRateLimitsResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPeerRateLimitsResp) ProtoMessage() {} - -func (x *GetPeerRateLimitsResp) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPeerRateLimitsResp.ProtoReflect.Descriptor instead. -func (*GetPeerRateLimitsResp) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{1} -} - -func (x *GetPeerRateLimitsResp) GetRateLimits() []*RateLimitResp { - if x != nil { - return x.RateLimits - } - return nil -} - -type UpdatePeerGlobalsReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must specify at least one RateLimit - Globals []*UpdatePeerGlobal `protobuf:"bytes,1,rep,name=globals,proto3" json:"globals,omitempty"` -} - -func (x *UpdatePeerGlobalsReq) Reset() { - *x = UpdatePeerGlobalsReq{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobalsReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobalsReq) ProtoMessage() {} - -func (x *UpdatePeerGlobalsReq) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobalsReq.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobalsReq) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{2} -} - -func (x *UpdatePeerGlobalsReq) GetGlobals() []*UpdatePeerGlobal { - if x != nil { - return x.Globals - } - return nil -} - -type UpdatePeerGlobal struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Uniquely identifies this rate limit IE: 'ip:10.2.10.7' or 'account:123445' - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Status *RateLimitResp `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - // The algorithm used to calculate the rate limit. The algorithm may change on - // subsequent requests, when this occurs any previous rate limit hit counts are reset. - Algorithm Algorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=pb.gubernator.Algorithm" json:"algorithm,omitempty"` - // The duration of the rate limit in milliseconds - Duration int64 `protobuf:"varint,4,opt,name=duration,proto3" json:"duration,omitempty"` - // The exact time the original request was created in Epoch milliseconds. - // Due to time drift between systems, it may be advantageous for a client to - // set the exact time the request was created. It possible the system clock - // for the client has drifted from the system clock where gubernator daemon - // is running. - // - // The created time is used by gubernator to calculate the reset time for - // both token and leaky algorithms. If it is not set by the client, - // gubernator will set the created time when it receives the rate limit - // request. - CreatedAt int64 `protobuf:"varint,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (x *UpdatePeerGlobal) Reset() { - *x = UpdatePeerGlobal{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobal) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobal) ProtoMessage() {} - -func (x *UpdatePeerGlobal) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobal.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobal) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{3} -} - -func (x *UpdatePeerGlobal) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *UpdatePeerGlobal) GetStatus() *RateLimitResp { - if x != nil { - return x.Status - } - return nil -} - -func (x *UpdatePeerGlobal) GetAlgorithm() Algorithm { - if x != nil { - return x.Algorithm - } - return Algorithm_TOKEN_BUCKET -} - -func (x *UpdatePeerGlobal) GetDuration() int64 { - if x != nil { - return x.Duration - } - return 0 -} - -func (x *UpdatePeerGlobal) GetCreatedAt() int64 { - if x != nil { - return x.CreatedAt - } - return 0 -} - -type UpdatePeerGlobalsResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *UpdatePeerGlobalsResp) Reset() { - *x = UpdatePeerGlobalsResp{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobalsResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobalsResp) ProtoMessage() {} - -func (x *UpdatePeerGlobalsResp) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobalsResp.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobalsResp) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{4} -} - -var File_peers_proto protoreflect.FileDescriptor - -var file_peers_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x70, - 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x1a, 0x10, 0x67, 0x75, - 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, - 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, - 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x52, 0x65, 0x71, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, - 0x56, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3d, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x52, 0x0a, 0x72, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x22, 0x51, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x12, - 0x39, 0x0a, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x52, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x22, 0xcd, 0x01, 0x0a, 0x10, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x32, 0xcd, 0x01, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x73, 0x56, 0x31, 0x12, - 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x24, 0x2e, 0x70, 0x62, 0x2e, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, - 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x00, 0x12, 0x60, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, - 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, - 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x24, 0x2e, 0x70, 0x62, - 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x22, 0x00, 0x42, 0x28, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x69, 0x6f, 0x2f, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x80, 0x01, 0x01, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_peers_proto_rawDescOnce sync.Once - file_peers_proto_rawDescData = file_peers_proto_rawDesc -) - -func file_peers_proto_rawDescGZIP() []byte { - file_peers_proto_rawDescOnce.Do(func() { - file_peers_proto_rawDescData = protoimpl.X.CompressGZIP(file_peers_proto_rawDescData) - }) - return file_peers_proto_rawDescData -} - -var file_peers_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_peers_proto_goTypes = []interface{}{ - (*GetPeerRateLimitsReq)(nil), // 0: pb.gubernator.GetPeerRateLimitsReq - (*GetPeerRateLimitsResp)(nil), // 1: pb.gubernator.GetPeerRateLimitsResp - (*UpdatePeerGlobalsReq)(nil), // 2: pb.gubernator.UpdatePeerGlobalsReq - (*UpdatePeerGlobal)(nil), // 3: pb.gubernator.UpdatePeerGlobal - (*UpdatePeerGlobalsResp)(nil), // 4: pb.gubernator.UpdatePeerGlobalsResp - (*RateLimitReq)(nil), // 5: pb.gubernator.RateLimitReq - (*RateLimitResp)(nil), // 6: pb.gubernator.RateLimitResp - (Algorithm)(0), // 7: pb.gubernator.Algorithm -} -var file_peers_proto_depIdxs = []int32{ - 5, // 0: pb.gubernator.GetPeerRateLimitsReq.requests:type_name -> pb.gubernator.RateLimitReq - 6, // 1: pb.gubernator.GetPeerRateLimitsResp.rate_limits:type_name -> pb.gubernator.RateLimitResp - 3, // 2: pb.gubernator.UpdatePeerGlobalsReq.globals:type_name -> pb.gubernator.UpdatePeerGlobal - 6, // 3: pb.gubernator.UpdatePeerGlobal.status:type_name -> pb.gubernator.RateLimitResp - 7, // 4: pb.gubernator.UpdatePeerGlobal.algorithm:type_name -> pb.gubernator.Algorithm - 0, // 5: pb.gubernator.PeersV1.GetPeerRateLimits:input_type -> pb.gubernator.GetPeerRateLimitsReq - 2, // 6: pb.gubernator.PeersV1.UpdatePeerGlobals:input_type -> pb.gubernator.UpdatePeerGlobalsReq - 1, // 7: pb.gubernator.PeersV1.GetPeerRateLimits:output_type -> pb.gubernator.GetPeerRateLimitsResp - 4, // 8: pb.gubernator.PeersV1.UpdatePeerGlobals:output_type -> pb.gubernator.UpdatePeerGlobalsResp - 7, // [7:9] is the sub-list for method output_type - 5, // [5:7] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_peers_proto_init() } -func file_peers_proto_init() { - if File_peers_proto != nil { - return - } - file_gubernator_proto_init() - if !protoimpl.UnsafeEnabled { - file_peers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPeerRateLimitsReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPeerRateLimitsResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobalsReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobal); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobalsResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_peers_proto_rawDesc, - NumEnums: 0, - NumMessages: 5, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_peers_proto_goTypes, - DependencyIndexes: file_peers_proto_depIdxs, - MessageInfos: file_peers_proto_msgTypes, - }.Build() - File_peers_proto = out.File - file_peers_proto_rawDesc = nil - file_peers_proto_goTypes = nil - file_peers_proto_depIdxs = nil -} diff --git a/peers.pb.gw.go b/peers.pb.gw.go deleted file mode 100644 index f092976..0000000 --- a/peers.pb.gw.go +++ /dev/null @@ -1,256 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: peers.proto - -/* -Package gubernator is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gubernator - -import ( - "context" - "io" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - -func request_PeersV1_GetPeerRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client PeersV1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPeerRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetPeerRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_PeersV1_GetPeerRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server PeersV1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPeerRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetPeerRateLimits(ctx, &protoReq) - return msg, metadata, err - -} - -func request_PeersV1_UpdatePeerGlobals_0(ctx context.Context, marshaler runtime.Marshaler, client PeersV1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UpdatePeerGlobalsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UpdatePeerGlobals(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_PeersV1_UpdatePeerGlobals_0(ctx context.Context, marshaler runtime.Marshaler, server PeersV1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UpdatePeerGlobalsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.UpdatePeerGlobals(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterPeersV1HandlerServer registers the http handlers for service PeersV1 to "mux". -// UnaryRPC :call PeersV1Server directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterPeersV1HandlerFromEndpoint instead. -func RegisterPeersV1HandlerServer(ctx context.Context, mux *runtime.ServeMux, server PeersV1Server) error { - - mux.Handle("POST", pattern_PeersV1_GetPeerRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.PeersV1/GetPeerRateLimits", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/GetPeerRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_PeersV1_GetPeerRateLimits_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_GetPeerRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_PeersV1_UpdatePeerGlobals_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.PeersV1/UpdatePeerGlobals", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/UpdatePeerGlobals")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_PeersV1_UpdatePeerGlobals_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_UpdatePeerGlobals_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterPeersV1HandlerFromEndpoint is same as RegisterPeersV1Handler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterPeersV1HandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterPeersV1Handler(ctx, mux, conn) -} - -// RegisterPeersV1Handler registers the http handlers for service PeersV1 to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterPeersV1Handler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterPeersV1HandlerClient(ctx, mux, NewPeersV1Client(conn)) -} - -// RegisterPeersV1HandlerClient registers the http handlers for service PeersV1 -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PeersV1Client". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PeersV1Client" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "PeersV1Client" to call the correct interceptors. -func RegisterPeersV1HandlerClient(ctx context.Context, mux *runtime.ServeMux, client PeersV1Client) error { - - mux.Handle("POST", pattern_PeersV1_GetPeerRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.PeersV1/GetPeerRateLimits", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/GetPeerRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_PeersV1_GetPeerRateLimits_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_GetPeerRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_PeersV1_UpdatePeerGlobals_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.PeersV1/UpdatePeerGlobals", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/UpdatePeerGlobals")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_PeersV1_UpdatePeerGlobals_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_UpdatePeerGlobals_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_PeersV1_GetPeerRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"pb.gubernator.PeersV1", "GetPeerRateLimits"}, "")) - - pattern_PeersV1_UpdatePeerGlobals_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"pb.gubernator.PeersV1", "UpdatePeerGlobals"}, "")) -) - -var ( - forward_PeersV1_GetPeerRateLimits_0 = runtime.ForwardResponseMessage - - forward_PeersV1_UpdatePeerGlobals_0 = runtime.ForwardResponseMessage -) diff --git a/peers_grpc.pb.go b/peers_grpc.pb.go deleted file mode 100644 index e74a7d1..0000000 --- a/peers_grpc.pb.go +++ /dev/null @@ -1,163 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: peers.proto - -package gubernator - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - PeersV1_GetPeerRateLimits_FullMethodName = "/pb.gubernator.PeersV1/GetPeerRateLimits" - PeersV1_UpdatePeerGlobals_FullMethodName = "/pb.gubernator.PeersV1/UpdatePeerGlobals" -) - -// PeersV1Client is the client API for PeersV1 service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PeersV1Client interface { - // Used by peers to relay batches of requests to an owner peer - GetPeerRateLimits(ctx context.Context, in *GetPeerRateLimitsReq, opts ...grpc.CallOption) (*GetPeerRateLimitsResp, error) - // Used by owner peers to send global rate limit updates to non-owner peers - UpdatePeerGlobals(ctx context.Context, in *UpdatePeerGlobalsReq, opts ...grpc.CallOption) (*UpdatePeerGlobalsResp, error) -} - -type peersV1Client struct { - cc grpc.ClientConnInterface -} - -func NewPeersV1Client(cc grpc.ClientConnInterface) PeersV1Client { - return &peersV1Client{cc} -} - -func (c *peersV1Client) GetPeerRateLimits(ctx context.Context, in *GetPeerRateLimitsReq, opts ...grpc.CallOption) (*GetPeerRateLimitsResp, error) { - out := new(GetPeerRateLimitsResp) - err := c.cc.Invoke(ctx, PeersV1_GetPeerRateLimits_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *peersV1Client) UpdatePeerGlobals(ctx context.Context, in *UpdatePeerGlobalsReq, opts ...grpc.CallOption) (*UpdatePeerGlobalsResp, error) { - out := new(UpdatePeerGlobalsResp) - err := c.cc.Invoke(ctx, PeersV1_UpdatePeerGlobals_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PeersV1Server is the server API for PeersV1 service. -// All implementations should embed UnimplementedPeersV1Server -// for forward compatibility -type PeersV1Server interface { - // Used by peers to relay batches of requests to an owner peer - GetPeerRateLimits(context.Context, *GetPeerRateLimitsReq) (*GetPeerRateLimitsResp, error) - // Used by owner peers to send global rate limit updates to non-owner peers - UpdatePeerGlobals(context.Context, *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) -} - -// UnimplementedPeersV1Server should be embedded to have forward compatible implementations. -type UnimplementedPeersV1Server struct { -} - -func (UnimplementedPeersV1Server) GetPeerRateLimits(context.Context, *GetPeerRateLimitsReq) (*GetPeerRateLimitsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPeerRateLimits not implemented") -} -func (UnimplementedPeersV1Server) UpdatePeerGlobals(context.Context, *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdatePeerGlobals not implemented") -} - -// UnsafePeersV1Server may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PeersV1Server will -// result in compilation errors. -type UnsafePeersV1Server interface { - mustEmbedUnimplementedPeersV1Server() -} - -func RegisterPeersV1Server(s grpc.ServiceRegistrar, srv PeersV1Server) { - s.RegisterService(&PeersV1_ServiceDesc, srv) -} - -func _PeersV1_GetPeerRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPeerRateLimitsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PeersV1Server).GetPeerRateLimits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PeersV1_GetPeerRateLimits_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PeersV1Server).GetPeerRateLimits(ctx, req.(*GetPeerRateLimitsReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _PeersV1_UpdatePeerGlobals_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdatePeerGlobalsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PeersV1Server).UpdatePeerGlobals(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PeersV1_UpdatePeerGlobals_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PeersV1Server).UpdatePeerGlobals(ctx, req.(*UpdatePeerGlobalsReq)) - } - return interceptor(ctx, in, info, handler) -} - -// PeersV1_ServiceDesc is the grpc.ServiceDesc for PeersV1 service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PeersV1_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pb.gubernator.PeersV1", - HandlerType: (*PeersV1Server)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPeerRateLimits", - Handler: _PeersV1_GetPeerRateLimits_Handler, - }, - { - MethodName: "UpdatePeerGlobals", - Handler: _PeersV1_UpdatePeerGlobals_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "peers.proto", -} diff --git a/python/gubernator/__init__.py b/python/gubernator/__init__.py deleted file mode 100644 index b90c1a3..0000000 --- a/python/gubernator/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This code is py3.7 and py2.7 compatible - -import gubernator.ratelimit_pb2_grpc as pb_grpc -from datetime import datetime - -import time -import grpc - -MILLISECOND = 1 -SECOND = MILLISECOND * 1000 -MINUTE = SECOND * 60 - - -def sleep_until_reset(reset_time): - now = datetime.now() - time.sleep((reset_time - now).seconds) - - -def V1Client(endpoint='127.0.0.1:9090'): - channel = grpc.insecure_channel(endpoint) - return pb_grpc.RateLimitServiceV1Stub(channel) diff --git a/python/gubernator/gubernator_pb2.py b/python/gubernator/gubernator_pb2.py index 5926757..6fe6165 100644 --- a/python/gubernator/gubernator_pb2.py +++ b/python/gubernator/gubernator_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: gubernator.proto -# Protobuf Python Version: 5.26.0 +# Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -12,47 +12,40 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10gubernator.proto\x12\rpb.gubernator\x1a\x1cgoogle/api/annotations.proto\"K\n\x10GetRateLimitsReq\x12\x37\n\x08requests\x18\x01 \x03(\x0b\x32\x1b.pb.gubernator.RateLimitReqR\x08requests\"O\n\x11GetRateLimitsResp\x12:\n\tresponses\x18\x01 \x03(\x0b\x32\x1c.pb.gubernator.RateLimitRespR\tresponses\"\xc1\x03\n\x0cRateLimitReq\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n\nunique_key\x18\x02 \x01(\tR\tuniqueKey\x12\x12\n\x04hits\x18\x03 \x01(\x03R\x04hits\x12\x14\n\x05limit\x18\x04 \x01(\x03R\x05limit\x12\x1a\n\x08\x64uration\x18\x05 \x01(\x03R\x08\x64uration\x12\x36\n\talgorithm\x18\x06 \x01(\x0e\x32\x18.pb.gubernator.AlgorithmR\talgorithm\x12\x33\n\x08\x62\x65havior\x18\x07 \x01(\x0e\x32\x17.pb.gubernator.BehaviorR\x08\x62\x65havior\x12\x14\n\x05\x62urst\x18\x08 \x01(\x03R\x05\x62urst\x12\x45\n\x08metadata\x18\t \x03(\x0b\x32).pb.gubernator.RateLimitReq.MetadataEntryR\x08metadata\x12\"\n\ncreated_at\x18\n \x01(\x03H\x00R\tcreatedAt\x88\x01\x01\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\r\n\x0b_created_at\"\xac\x02\n\rRateLimitResp\x12-\n\x06status\x18\x01 \x01(\x0e\x32\x15.pb.gubernator.StatusR\x06status\x12\x14\n\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n\tremaining\x18\x03 \x01(\x03R\tremaining\x12\x1d\n\nreset_time\x18\x04 \x01(\x03R\tresetTime\x12\x14\n\x05\x65rror\x18\x05 \x01(\tR\x05\x65rror\x12\x46\n\x08metadata\x18\x06 \x03(\x0b\x32*.pb.gubernator.RateLimitResp.MetadataEntryR\x08metadata\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x10\n\x0eHealthCheckReq\"b\n\x0fHealthCheckResp\x12\x16\n\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x1d\n\npeer_count\x18\x03 \x01(\x05R\tpeerCount*/\n\tAlgorithm\x12\x10\n\x0cTOKEN_BUCKET\x10\x00\x12\x10\n\x0cLEAKY_BUCKET\x10\x01*\x8d\x01\n\x08\x42\x65havior\x12\x0c\n\x08\x42\x41TCHING\x10\x00\x12\x0f\n\x0bNO_BATCHING\x10\x01\x12\n\n\x06GLOBAL\x10\x02\x12\x19\n\x15\x44URATION_IS_GREGORIAN\x10\x04\x12\x13\n\x0fRESET_REMAINING\x10\x08\x12\x10\n\x0cMULTI_REGION\x10\x10\x12\x14\n\x10\x44RAIN_OVER_LIMIT\x10 *)\n\x06Status\x12\x0f\n\x0bUNDER_LIMIT\x10\x00\x12\x0e\n\nOVER_LIMIT\x10\x01\x32\xdd\x01\n\x02V1\x12p\n\rGetRateLimits\x12\x1f.pb.gubernator.GetRateLimitsReq\x1a .pb.gubernator.GetRateLimitsResp\"\x1c\x82\xd3\xe4\x93\x02\x16\"\x11/v1/GetRateLimits:\x01*\x12\x65\n\x0bHealthCheck\x12\x1d.pb.gubernator.HealthCheckReq\x1a\x1e.pb.gubernator.HealthCheckResp\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/HealthCheckB(Z#github.com/gubernator-io/gubernator\x80\x01\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10gubernator.proto\x12\rgubernator.v3\"U\n\x16\x43heckRateLimitsRequest\x12;\n\x08requests\x18\x01 \x03(\x0b\x32\x1f.gubernator.v3.RateLimitRequestR\x08requests\"Y\n\x17\x43heckRateLimitsResponse\x12>\n\tresponses\x18\x01 \x03(\x0b\x32 .gubernator.v3.RateLimitResponseR\tresponses\"\xca\x03\n\x10RateLimitRequest\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1e\n\nunique_key\x18\x02 \x01(\tR\nunique_key\x12\x12\n\x04hits\x18\x03 \x01(\x03R\x04hits\x12\x14\n\x05limit\x18\x04 \x01(\x03R\x05limit\x12\x1a\n\x08\x64uration\x18\x05 \x01(\x03R\x08\x64uration\x12\x36\n\talgorithm\x18\x06 \x01(\x0e\x32\x18.gubernator.v3.AlgorithmR\talgorithm\x12\x33\n\x08\x62\x65havior\x18\x07 \x01(\x0e\x32\x17.gubernator.v3.BehaviorR\x08\x62\x65havior\x12\x14\n\x05\x62urst\x18\x08 \x01(\x03R\x05\x62urst\x12I\n\x08metadata\x18\t \x03(\x0b\x32-.gubernator.v3.RateLimitRequest.MetadataEntryR\x08metadata\x12\"\n\ncreated_at\x18\n \x01(\x03H\x00R\tcreatedAt\x88\x01\x01\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\r\n\x0b_created_at\"\xb5\x02\n\x11RateLimitResponse\x12-\n\x06status\x18\x01 \x01(\x0e\x32\x15.gubernator.v3.StatusR\x06status\x12\x14\n\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n\tremaining\x18\x03 \x01(\x03R\tremaining\x12\x1e\n\nreset_time\x18\x04 \x01(\x03R\nreset_time\x12\x14\n\x05\x65rror\x18\x05 \x01(\tR\x05\x65rror\x12J\n\x08metadata\x18\x06 \x03(\x0b\x32..gubernator.v3.RateLimitResponse.MetadataEntryR\x08metadata\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x14\n\x12HealthCheckRequest\"g\n\x13HealthCheckResponse\x12\x16\n\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x1e\n\npeer_count\x18\x03 \x01(\x05R\npeer_count*/\n\tAlgorithm\x12\x10\n\x0cTOKEN_BUCKET\x10\x00\x12\x10\n\x0cLEAKY_BUCKET\x10\x01*\x8d\x01\n\x08\x42\x65havior\x12\x0c\n\x08\x42\x41TCHING\x10\x00\x12\x0f\n\x0bNO_BATCHING\x10\x01\x12\n\n\x06GLOBAL\x10\x02\x12\x19\n\x15\x44URATION_IS_GREGORIAN\x10\x04\x12\x13\n\x0fRESET_REMAINING\x10\x08\x12\x10\n\x0cMULTI_REGION\x10\x10\x12\x14\n\x10\x44RAIN_OVER_LIMIT\x10 *)\n\x06Status\x12\x0f\n\x0bUNDER_LIMIT\x10\x00\x12\x0e\n\nOVER_LIMIT\x10\x01\x42%Z#github.com/gubernator-io/gubernatorb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'gubernator_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'Z#github.com/gubernator-io/gubernator\200\001\001' - _globals['_RATELIMITREQ_METADATAENTRY']._loaded_options = None - _globals['_RATELIMITREQ_METADATAENTRY']._serialized_options = b'8\001' - _globals['_RATELIMITRESP_METADATAENTRY']._loaded_options = None - _globals['_RATELIMITRESP_METADATAENTRY']._serialized_options = b'8\001' - _globals['_V1'].methods_by_name['GetRateLimits']._loaded_options = None - _globals['_V1'].methods_by_name['GetRateLimits']._serialized_options = b'\202\323\344\223\002\026\"\021/v1/GetRateLimits:\001*' - _globals['_V1'].methods_by_name['HealthCheck']._loaded_options = None - _globals['_V1'].methods_by_name['HealthCheck']._serialized_options = b'\202\323\344\223\002\021\022\017/v1/HealthCheck' - _globals['_ALGORITHM']._serialized_start=1096 - _globals['_ALGORITHM']._serialized_end=1143 - _globals['_BEHAVIOR']._serialized_start=1146 - _globals['_BEHAVIOR']._serialized_end=1287 - _globals['_STATUS']._serialized_start=1289 - _globals['_STATUS']._serialized_end=1330 - _globals['_GETRATELIMITSREQ']._serialized_start=65 - _globals['_GETRATELIMITSREQ']._serialized_end=140 - _globals['_GETRATELIMITSRESP']._serialized_start=142 - _globals['_GETRATELIMITSRESP']._serialized_end=221 - _globals['_RATELIMITREQ']._serialized_start=224 - _globals['_RATELIMITREQ']._serialized_end=673 - _globals['_RATELIMITREQ_METADATAENTRY']._serialized_start=599 - _globals['_RATELIMITREQ_METADATAENTRY']._serialized_end=658 - _globals['_RATELIMITRESP']._serialized_start=676 - _globals['_RATELIMITRESP']._serialized_end=976 - _globals['_RATELIMITRESP_METADATAENTRY']._serialized_start=599 - _globals['_RATELIMITRESP_METADATAENTRY']._serialized_end=658 - _globals['_HEALTHCHECKREQ']._serialized_start=978 - _globals['_HEALTHCHECKREQ']._serialized_end=994 - _globals['_HEALTHCHECKRESP']._serialized_start=996 - _globals['_HEALTHCHECKRESP']._serialized_end=1094 - _globals['_V1']._serialized_start=1333 - _globals['_V1']._serialized_end=1554 + _globals['DESCRIPTOR']._serialized_options = b'Z#github.com/gubernator-io/gubernator' + _globals['_RATELIMITREQUEST_METADATAENTRY']._loaded_options = None + _globals['_RATELIMITREQUEST_METADATAENTRY']._serialized_options = b'8\001' + _globals['_RATELIMITRESPONSE_METADATAENTRY']._loaded_options = None + _globals['_RATELIMITRESPONSE_METADATAENTRY']._serialized_options = b'8\001' + _globals['_ALGORITHM']._serialized_start=1113 + _globals['_ALGORITHM']._serialized_end=1160 + _globals['_BEHAVIOR']._serialized_start=1163 + _globals['_BEHAVIOR']._serialized_end=1304 + _globals['_STATUS']._serialized_start=1306 + _globals['_STATUS']._serialized_end=1347 + _globals['_CHECKRATELIMITSREQUEST']._serialized_start=35 + _globals['_CHECKRATELIMITSREQUEST']._serialized_end=120 + _globals['_CHECKRATELIMITSRESPONSE']._serialized_start=122 + _globals['_CHECKRATELIMITSRESPONSE']._serialized_end=211 + _globals['_RATELIMITREQUEST']._serialized_start=214 + _globals['_RATELIMITREQUEST']._serialized_end=672 + _globals['_RATELIMITREQUEST_METADATAENTRY']._serialized_start=598 + _globals['_RATELIMITREQUEST_METADATAENTRY']._serialized_end=657 + _globals['_RATELIMITRESPONSE']._serialized_start=675 + _globals['_RATELIMITRESPONSE']._serialized_end=984 + _globals['_RATELIMITRESPONSE_METADATAENTRY']._serialized_start=598 + _globals['_RATELIMITRESPONSE_METADATAENTRY']._serialized_end=657 + _globals['_HEALTHCHECKREQUEST']._serialized_start=986 + _globals['_HEALTHCHECKREQUEST']._serialized_end=1006 + _globals['_HEALTHCHECKRESPONSE']._serialized_start=1008 + _globals['_HEALTHCHECKRESPONSE']._serialized_end=1111 # @@protoc_insertion_point(module_scope) diff --git a/python/gubernator/gubernator_pb2_grpc.py b/python/gubernator/gubernator_pb2_grpc.py deleted file mode 100644 index 02dd779..0000000 --- a/python/gubernator/gubernator_pb2_grpc.py +++ /dev/null @@ -1,102 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import gubernator_pb2 as gubernator__pb2 - - -class V1Stub(object): - """Missing associated documentation comment in .proto file.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetRateLimits = channel.unary_unary( - '/pb.gubernator.V1/GetRateLimits', - request_serializer=gubernator__pb2.GetRateLimitsReq.SerializeToString, - response_deserializer=gubernator__pb2.GetRateLimitsResp.FromString, - ) - self.HealthCheck = channel.unary_unary( - '/pb.gubernator.V1/HealthCheck', - request_serializer=gubernator__pb2.HealthCheckReq.SerializeToString, - response_deserializer=gubernator__pb2.HealthCheckResp.FromString, - ) - - -class V1Servicer(object): - """Missing associated documentation comment in .proto file.""" - - def GetRateLimits(self, request, context): - """Given a list of rate limit requests, return the rate limits of each. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def HealthCheck(self, request, context): - """This method is for round trip benchmarking and can be used by - the client to determine connectivity to the server - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_V1Servicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetRateLimits': grpc.unary_unary_rpc_method_handler( - servicer.GetRateLimits, - request_deserializer=gubernator__pb2.GetRateLimitsReq.FromString, - response_serializer=gubernator__pb2.GetRateLimitsResp.SerializeToString, - ), - 'HealthCheck': grpc.unary_unary_rpc_method_handler( - servicer.HealthCheck, - request_deserializer=gubernator__pb2.HealthCheckReq.FromString, - response_serializer=gubernator__pb2.HealthCheckResp.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'pb.gubernator.V1', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class V1(object): - """Missing associated documentation comment in .proto file.""" - - @staticmethod - def GetRateLimits(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.V1/GetRateLimits', - gubernator__pb2.GetRateLimitsReq.SerializeToString, - gubernator__pb2.GetRateLimitsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def HealthCheck(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.V1/HealthCheck', - gubernator__pb2.HealthCheckReq.SerializeToString, - gubernator__pb2.HealthCheckResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/gubernator/peer_pb2.py b/python/gubernator/peer_pb2.py new file mode 100644 index 0000000..6d5ef2b --- /dev/null +++ b/python/gubernator/peer_pb2.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: peer.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +import gubernator_pb2 as gubernator__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\npeer.proto\x12\rgubernator.v3\x1a\x10gubernator.proto\"M\n\x0e\x46orwardRequest\x12;\n\x08requests\x18\x01 \x03(\x0b\x32\x1f.gubernator.v3.RateLimitRequestR\x08requests\"T\n\x0f\x46orwardResponse\x12\x41\n\x0brate_limits\x18\x01 \x03(\x0b\x32 .gubernator.v3.RateLimitResponseR\nrateLimits\"I\n\rUpdateRequest\x12\x38\n\x07globals\x18\x01 \x03(\x0b\x32\x1e.gubernator.v3.UpdateRateLimitR\x07globals\"\xce\x01\n\x0fUpdateRateLimit\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x36\n\x05state\x18\x02 \x01(\x0b\x32 .gubernator.v3.RateLimitResponseR\x05state\x12\x36\n\talgorithm\x18\x03 \x01(\x0e\x32\x18.gubernator.v3.AlgorithmR\talgorithm\x12\x1a\n\x08\x64uration\x18\x04 \x01(\x03R\x08\x64uration\x12\x1d\n\ncreated_at\x18\x05 \x01(\x03R\tcreatedAtB%Z#github.com/gubernator-io/gubernatorb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'peer_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z#github.com/gubernator-io/gubernator' + _globals['_FORWARDREQUEST']._serialized_start=47 + _globals['_FORWARDREQUEST']._serialized_end=124 + _globals['_FORWARDRESPONSE']._serialized_start=126 + _globals['_FORWARDRESPONSE']._serialized_end=210 + _globals['_UPDATEREQUEST']._serialized_start=212 + _globals['_UPDATEREQUEST']._serialized_end=285 + _globals['_UPDATERATELIMIT']._serialized_start=288 + _globals['_UPDATERATELIMIT']._serialized_end=494 +# @@protoc_insertion_point(module_scope) diff --git a/python/gubernator/peers_pb2.py b/python/gubernator/peers_pb2.py deleted file mode 100644 index 6fe5197..0000000 --- a/python/gubernator/peers_pb2.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peers.proto -# Protobuf Python Version: 5.26.0 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -import gubernator_pb2 as gubernator__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0bpeers.proto\x12\rpb.gubernator\x1a\x10gubernator.proto\"O\n\x14GetPeerRateLimitsReq\x12\x37\n\x08requests\x18\x01 \x03(\x0b\x32\x1b.pb.gubernator.RateLimitReqR\x08requests\"V\n\x15GetPeerRateLimitsResp\x12=\n\x0brate_limits\x18\x01 \x03(\x0b\x32\x1c.pb.gubernator.RateLimitRespR\nrateLimits\"Q\n\x14UpdatePeerGlobalsReq\x12\x39\n\x07globals\x18\x01 \x03(\x0b\x32\x1f.pb.gubernator.UpdatePeerGlobalR\x07globals\"\xcd\x01\n\x10UpdatePeerGlobal\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x34\n\x06status\x18\x02 \x01(\x0b\x32\x1c.pb.gubernator.RateLimitRespR\x06status\x12\x36\n\talgorithm\x18\x03 \x01(\x0e\x32\x18.pb.gubernator.AlgorithmR\talgorithm\x12\x1a\n\x08\x64uration\x18\x04 \x01(\x03R\x08\x64uration\x12\x1d\n\ncreated_at\x18\x05 \x01(\x03R\tcreatedAt\"\x17\n\x15UpdatePeerGlobalsResp2\xcd\x01\n\x07PeersV1\x12`\n\x11GetPeerRateLimits\x12#.pb.gubernator.GetPeerRateLimitsReq\x1a$.pb.gubernator.GetPeerRateLimitsResp\"\x00\x12`\n\x11UpdatePeerGlobals\x12#.pb.gubernator.UpdatePeerGlobalsReq\x1a$.pb.gubernator.UpdatePeerGlobalsResp\"\x00\x42(Z#github.com/gubernator-io/gubernator\x80\x01\x01\x62\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'peers_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'Z#github.com/gubernator-io/gubernator\200\001\001' - _globals['_GETPEERRATELIMITSREQ']._serialized_start=48 - _globals['_GETPEERRATELIMITSREQ']._serialized_end=127 - _globals['_GETPEERRATELIMITSRESP']._serialized_start=129 - _globals['_GETPEERRATELIMITSRESP']._serialized_end=215 - _globals['_UPDATEPEERGLOBALSREQ']._serialized_start=217 - _globals['_UPDATEPEERGLOBALSREQ']._serialized_end=298 - _globals['_UPDATEPEERGLOBAL']._serialized_start=301 - _globals['_UPDATEPEERGLOBAL']._serialized_end=506 - _globals['_UPDATEPEERGLOBALSRESP']._serialized_start=508 - _globals['_UPDATEPEERGLOBALSRESP']._serialized_end=531 - _globals['_PEERSV1']._serialized_start=534 - _globals['_PEERSV1']._serialized_end=739 -# @@protoc_insertion_point(module_scope) diff --git a/python/gubernator/peers_pb2_grpc.py b/python/gubernator/peers_pb2_grpc.py deleted file mode 100644 index 9ebb860..0000000 --- a/python/gubernator/peers_pb2_grpc.py +++ /dev/null @@ -1,104 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import peers_pb2 as peers__pb2 - - -class PeersV1Stub(object): - """NOTE: For use by gubernator peers only - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetPeerRateLimits = channel.unary_unary( - '/pb.gubernator.PeersV1/GetPeerRateLimits', - request_serializer=peers__pb2.GetPeerRateLimitsReq.SerializeToString, - response_deserializer=peers__pb2.GetPeerRateLimitsResp.FromString, - ) - self.UpdatePeerGlobals = channel.unary_unary( - '/pb.gubernator.PeersV1/UpdatePeerGlobals', - request_serializer=peers__pb2.UpdatePeerGlobalsReq.SerializeToString, - response_deserializer=peers__pb2.UpdatePeerGlobalsResp.FromString, - ) - - -class PeersV1Servicer(object): - """NOTE: For use by gubernator peers only - """ - - def GetPeerRateLimits(self, request, context): - """Used by peers to relay batches of requests to an owner peer - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def UpdatePeerGlobals(self, request, context): - """Used by owner peers to send global rate limit updates to non-owner peers - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_PeersV1Servicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetPeerRateLimits': grpc.unary_unary_rpc_method_handler( - servicer.GetPeerRateLimits, - request_deserializer=peers__pb2.GetPeerRateLimitsReq.FromString, - response_serializer=peers__pb2.GetPeerRateLimitsResp.SerializeToString, - ), - 'UpdatePeerGlobals': grpc.unary_unary_rpc_method_handler( - servicer.UpdatePeerGlobals, - request_deserializer=peers__pb2.UpdatePeerGlobalsReq.FromString, - response_serializer=peers__pb2.UpdatePeerGlobalsResp.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'pb.gubernator.PeersV1', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class PeersV1(object): - """NOTE: For use by gubernator peers only - """ - - @staticmethod - def GetPeerRateLimits(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.PeersV1/GetPeerRateLimits', - peers__pb2.GetPeerRateLimitsReq.SerializeToString, - peers__pb2.GetPeerRateLimitsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def UpdatePeerGlobals(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.PeersV1/UpdatePeerGlobals', - peers__pb2.UpdatePeerGlobalsReq.SerializeToString, - peers__pb2.UpdatePeerGlobalsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/requirements-py2.txt b/python/requirements-py2.txt deleted file mode 100644 index bd761a7..0000000 --- a/python/requirements-py2.txt +++ /dev/null @@ -1,15 +0,0 @@ -atomicwrites==1.3.0 -attrs==18.2.0 -enum34==1.1.6 -funcsigs==1.0.2 -futures==3.2.0 -googleapis-common-protos==1.5.8 -grpcio==1.53.2 -more-itertools==5.0.0 -pathlib2==2.3.3 -pluggy==0.8.1 -protobuf==3.18.3 -py==1.10.0 -pytest==7.2.0 -scandir==1.9.0 -six==1.12.0 diff --git a/python/requirements-py3.txt b/python/requirements-py3.txt deleted file mode 100644 index 00d54ad..0000000 --- a/python/requirements-py3.txt +++ /dev/null @@ -1,11 +0,0 @@ -atomicwrites==1.3.0 -attrs==18.2.0 -googleapis-common-protos==1.5.8 -grpcio==1.53.2 -grpcio-tools==1.19.0 -more-itertools==6.0.0 -pluggy==0.8.1 -protobuf==3.18.3 -py==1.10.0 -pytest==7.2.0 -six==1.12.0 diff --git a/python/setup.py b/python/setup.py deleted file mode 100755 index 6657f3f..0000000 --- a/python/setup.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2018-2022 Mailgun Technologies Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -try: # for pip >= 10 - from pip._internal.req import parse_requirements -except ImportError: # for pip <= 9.0.3 - from pip.req import parse_requirements -from setuptools import setup, find_packages -import platform - -with open('version', 'r') as version_file: - version = version_file.readline().strip() - -if platform.python_version_tuple()[0] == '2': - reqs = parse_requirements('requirements-py2.txt', session='') -else: - reqs = parse_requirements('requirements-py3.txt', session='') - -requirements = [str(r.req) for r in reqs] - -setup( - name='gubernator', - version='0.1.0', - description="Python client for gubernator", - author="Derrick J. Wippler", - author_email='thrawn01@gmail.com', - url='https://github.com/gubernator-io/gubernator', - package_dir={'': '.'}, - packages=find_packages('.', exclude=['tests']), - install_requires=requirements, - license="Apache Software License 2.0", - python_requires='>=2.7', - classifiers=[ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - ], -) diff --git a/python/tests/__init__.py b/python/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/python/tests/test_client.py b/python/tests/test_client.py deleted file mode 100644 index 28efabf..0000000 --- a/python/tests/test_client.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2018-2022 Mailgun Technologies Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from gubernator import ratelimit_pb2 as pb - -import pytest -import subprocess -import os -import gubernator - - -@pytest.fixture(scope='module') -def cluster(): - args = ["/bin/sh", "-c", - "go run ./cmd/gubernator-cluster/main.go"] - - os.chdir("golang") - proc = subprocess.Popen(args, stdout=subprocess.PIPE) - os.chdir("..") - - while True: - line = proc.stdout.readline() - if b'Ready' in line: - break - yield proc - proc.kill() - - -def test_health_check(cluster): - client = gubernator.V1Client() - resp = client.health_check() - print("Health:", resp) - - -def test_get_rate_limit(cluster): - req = pb.Requests() - rate_limit = req.requests.add() - - rate_limit.algorithm = pb.TOKEN_BUCKET - rate_limit.duration = gubernator.SECOND * 2 - rate_limit.limit = 10 - rate_limit.namespace = 'test-ns' - rate_limit.unique_key = 'domain-id-0001' - rate_limit.hits = 1 - - client = gubernator.V1Client() - resp = client.GetRateLimits(req, timeout=0.5) - print("RateLimit: {}".format(resp)) diff --git a/region_picker.go b/region_picker.go index 4bef59d..6bd8d0b 100644 --- a/region_picker.go +++ b/region_picker.go @@ -17,11 +17,11 @@ limitations under the License. package gubernator type RegionPeerPicker interface { - GetClients(string) ([]*PeerClient, error) - GetByPeerInfo(PeerInfo) *PeerClient + GetClients(string) ([]*Peer, error) + GetByPeerInfo(PeerInfo) *Peer Pickers() map[string]PeerPicker - Peers() []*PeerClient - Add(*PeerClient) + Peers() []*Peer + Add(*Peer) New() RegionPeerPicker } @@ -32,14 +32,14 @@ type RegionPicker struct { // A map of all the pickers by region regions map[string]PeerPicker // The implementation of picker we will use for each region - reqQueue chan *RateLimitReq + reqQueue chan *RateLimitRequest } func NewRegionPicker(fn HashString64) *RegionPicker { rp := &RegionPicker{ regions: make(map[string]PeerPicker), - reqQueue: make(chan *RateLimitReq), - ReplicatedConsistentHash: NewReplicatedConsistentHash(fn, defaultReplicas), + reqQueue: make(chan *RateLimitRequest), + ReplicatedConsistentHash: NewReplicatedConsistentHash(fn, DefaultReplicas), } return rp } @@ -48,14 +48,14 @@ func (rp *RegionPicker) New() RegionPeerPicker { hash := rp.ReplicatedConsistentHash.New().(*ReplicatedConsistentHash) return &RegionPicker{ regions: make(map[string]PeerPicker), - reqQueue: make(chan *RateLimitReq), + reqQueue: make(chan *RateLimitRequest), ReplicatedConsistentHash: hash, } } // GetClients returns all the PeerClients that match this key in all regions -func (rp *RegionPicker) GetClients(key string) ([]*PeerClient, error) { - result := make([]*PeerClient, len(rp.regions)) +func (rp *RegionPicker) GetClients(key string) ([]*Peer, error) { + result := make([]*Peer, len(rp.regions)) var i int for _, picker := range rp.regions { peer, err := picker.Get(key) @@ -69,7 +69,7 @@ func (rp *RegionPicker) GetClients(key string) ([]*PeerClient, error) { } // GetByPeerInfo returns the first PeerClient the PeerInfo.HasKey() matches -func (rp *RegionPicker) GetByPeerInfo(info PeerInfo) *PeerClient { +func (rp *RegionPicker) GetByPeerInfo(info PeerInfo) *Peer { for _, picker := range rp.regions { if client := picker.GetByPeerInfo(info); client != nil { return client @@ -83,8 +83,8 @@ func (rp *RegionPicker) Pickers() map[string]PeerPicker { return rp.regions } -func (rp *RegionPicker) Peers() []*PeerClient { - var peers []*PeerClient +func (rp *RegionPicker) Peers() []*Peer { + var peers []*Peer for _, picker := range rp.regions { peers = append(peers, picker.Peers()...) @@ -93,7 +93,7 @@ func (rp *RegionPicker) Peers() []*PeerClient { return peers } -func (rp *RegionPicker) Add(peer *PeerClient) { +func (rp *RegionPicker) Add(peer *Peer) { picker, ok := rp.regions[peer.Info().DataCenter] if !ok { picker = rp.ReplicatedConsistentHash.New() diff --git a/replicated_hash.go b/replicated_hash.go index c53504e..c5c4586 100644 --- a/replicated_hash.go +++ b/replicated_hash.go @@ -26,29 +26,38 @@ import ( "github.com/segmentio/fasthash/fnv1" ) -const defaultReplicas = 512 +type PeerPicker interface { + GetByPeerInfo(PeerInfo) *Peer + Peers() []*Peer + Get(string) (*Peer, error) + New() PeerPicker + Add(*Peer) +} + +// DefaultReplicas is the number of replicas the hashmap will create by default +const DefaultReplicas = 512 type HashString64 func(data string) uint64 var defaultHashString64 HashString64 = fnv1.HashString64 -// Implements PeerPicker +// ReplicatedConsistentHash implements PeerPicker type ReplicatedConsistentHash struct { hashFunc HashString64 peerKeys []peerInfo - peers map[string]*PeerClient + peers map[string]*Peer replicas int } type peerInfo struct { hash uint64 - peer *PeerClient + peer *Peer } func NewReplicatedConsistentHash(fn HashString64, replicas int) *ReplicatedConsistentHash { ch := &ReplicatedConsistentHash{ hashFunc: fn, - peers: make(map[string]*PeerClient), + peers: make(map[string]*Peer), replicas: replicas, } @@ -61,24 +70,24 @@ func NewReplicatedConsistentHash(fn HashString64, replicas int) *ReplicatedConsi func (ch *ReplicatedConsistentHash) New() PeerPicker { return &ReplicatedConsistentHash{ hashFunc: ch.hashFunc, - peers: make(map[string]*PeerClient), + peers: make(map[string]*Peer), replicas: ch.replicas, } } -func (ch *ReplicatedConsistentHash) Peers() []*PeerClient { - var results []*PeerClient +func (ch *ReplicatedConsistentHash) Peers() []*Peer { + var results []*Peer for _, v := range ch.peers { results = append(results, v) } return results } -// Adds a peer to the hash -func (ch *ReplicatedConsistentHash) Add(peer *PeerClient) { - ch.peers[peer.Info().GRPCAddress] = peer +// Add a peer to the hash +func (ch *ReplicatedConsistentHash) Add(peer *Peer) { + ch.peers[peer.Info().HTTPAddress] = peer - key := fmt.Sprintf("%x", md5.Sum([]byte(peer.Info().GRPCAddress))) + key := fmt.Sprintf("%x", md5.Sum([]byte(peer.Info().HTTPAddress))) for i := 0; i < ch.replicas; i++ { hash := ch.hashFunc(strconv.Itoa(i) + key) ch.peerKeys = append(ch.peerKeys, peerInfo{ @@ -90,18 +99,18 @@ func (ch *ReplicatedConsistentHash) Add(peer *PeerClient) { sort.Slice(ch.peerKeys, func(i, j int) bool { return ch.peerKeys[i].hash < ch.peerKeys[j].hash }) } -// Returns number of peers in the picker +// Size returns number of peers in the picker func (ch *ReplicatedConsistentHash) Size() int { return len(ch.peers) } -// Returns the peer by hostname -func (ch *ReplicatedConsistentHash) GetByPeerInfo(peer PeerInfo) *PeerClient { - return ch.peers[peer.GRPCAddress] +// GetByPeerInfo returns the peer by hostname +func (ch *ReplicatedConsistentHash) GetByPeerInfo(peer PeerInfo) *Peer { + return ch.peers[peer.HTTPAddress] } -// Given a key, return the peer that key is assigned too -func (ch *ReplicatedConsistentHash) Get(key string) (*PeerClient, error) { +// Get returns the peer that key is assigned too +func (ch *ReplicatedConsistentHash) Get(key string) (*Peer, error) { if ch.Size() == 0 { return nil, errors.New("unable to pick a peer; pool is empty") } diff --git a/replicated_hash_test.go b/replicated_hash_test.go index 699808b..7cd9c84 100644 --- a/replicated_hash_test.go +++ b/replicated_hash_test.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gubernator +package gubernator_test import ( "net" "testing" + guber "github.com/gubernator-io/gubernator/v3" "github.com/segmentio/fasthash/fnv1" "github.com/segmentio/fasthash/fnv1a" "github.com/stretchr/testify/assert" @@ -29,27 +30,27 @@ func TestReplicatedConsistentHash(t *testing.T) { hosts := []string{"a.svc.local", "b.svc.local", "c.svc.local"} t.Run("Size", func(t *testing.T) { - hash := NewReplicatedConsistentHash(nil, defaultReplicas) + hash := guber.NewReplicatedConsistentHash(nil, guber.DefaultReplicas) for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&guber.Peer{Conf: guber.PeerConfig{Info: guber.PeerInfo{HTTPAddress: h}}}) } assert.Equal(t, len(hosts), hash.Size()) }) t.Run("Host", func(t *testing.T) { - hash := NewReplicatedConsistentHash(nil, defaultReplicas) - hostMap := map[string]*PeerClient{} + hash := guber.NewReplicatedConsistentHash(nil, guber.DefaultReplicas) + hostMap := make(map[string]*guber.Peer) for _, h := range hosts { - peer := &PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}} + peer := &guber.Peer{Conf: guber.PeerConfig{Info: guber.PeerInfo{HTTPAddress: h}}} hash.Add(peer) hostMap[h] = peer } for host, peer := range hostMap { - assert.Equal(t, peer, hash.GetByPeerInfo(PeerInfo{GRPCAddress: host})) + assert.Equal(t, peer, hash.GetByPeerInfo(guber.PeerInfo{HTTPAddress: host})) } }) @@ -62,7 +63,7 @@ func TestReplicatedConsistentHash(t *testing.T) { for _, tc := range []struct { name string - inHashFunc HashString64 + inHashFunc guber.HashString64 outDistribution map[string]int }{{ name: "default", @@ -83,17 +84,17 @@ func TestReplicatedConsistentHash(t *testing.T) { }, }} { t.Run(tc.name, func(t *testing.T) { - hash := NewReplicatedConsistentHash(tc.inHashFunc, defaultReplicas) + hash := guber.NewReplicatedConsistentHash(tc.inHashFunc, guber.DefaultReplicas) distribution := make(map[string]int) for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&guber.Peer{Conf: guber.PeerConfig{Info: guber.PeerInfo{HTTPAddress: h}}}) distribution[h] = 0 } for i := range strings { peer, _ := hash.Get(strings[i]) - distribution[peer.Info().GRPCAddress]++ + distribution[peer.Info().HTTPAddress]++ } assert.Equal(t, tc.outDistribution, distribution) }) @@ -103,7 +104,7 @@ func TestReplicatedConsistentHash(t *testing.T) { } func BenchmarkReplicatedConsistantHash(b *testing.B) { - hashFuncs := map[string]HashString64{ + hashFuncs := map[string]guber.HashString64{ "fasthash/fnv1a": fnv1a.HashString64, "fasthash/fnv1": fnv1.HashString64, } @@ -115,10 +116,10 @@ func BenchmarkReplicatedConsistantHash(b *testing.B) { ips[i] = net.IPv4(byte(i>>24), byte(i>>16), byte(i>>8), byte(i)).String() } - hash := NewReplicatedConsistentHash(hashFunc, defaultReplicas) + hash := guber.NewReplicatedConsistentHash(hashFunc, guber.DefaultReplicas) hosts := []string{"a.svc.local", "b.svc.local", "c.svc.local"} for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&guber.Peer{Conf: guber.PeerConfig{Info: guber.PeerInfo{HTTPAddress: h}}}) } b.ResetTimer() diff --git a/staticbuilder.go b/staticbuilder.go deleted file mode 100644 index 9bbd832..0000000 --- a/staticbuilder.go +++ /dev/null @@ -1,45 +0,0 @@ -package gubernator - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -type staticBuilder struct{} - -var _ resolver.Builder = (*staticBuilder)(nil) - -func (sb *staticBuilder) Scheme() string { - return "static" -} - -func (sb *staticBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - var resolverAddrs []resolver.Address - for _, address := range strings.Split(target.Endpoint(), ",") { - resolverAddrs = append(resolverAddrs, resolver.Address{ - Addr: address, - ServerName: address, - }) - } - if err := cc.UpdateState(resolver.State{Addresses: resolverAddrs}); err != nil { - return nil, err - } - return &staticResolver{cc: cc}, nil -} - -// NewStaticBuilder returns a builder which returns a staticResolver that tells GRPC -// to connect a specific peer in the cluster. -func NewStaticBuilder() resolver.Builder { - return &staticBuilder{} -} - -type staticResolver struct { - cc resolver.ClientConn -} - -func (sr *staticResolver) ResolveNow(_ resolver.ResolveNowOptions) {} - -func (sr *staticResolver) Close() {} - -var _ resolver.Resolver = (*staticResolver)(nil) diff --git a/store.go b/store.go index 1c23461..9feea2b 100644 --- a/store.go +++ b/store.go @@ -50,13 +50,13 @@ type Store interface { // Called by gubernator *after* a rate limit item is updated. It's up to the store to // decide if this rate limit item should be persisted in the store. It's up to the // store to expire old rate limit items. The CacheItem represents the current state of - // the rate limit item *after* the RateLimitReq has been applied. - OnChange(ctx context.Context, r *RateLimitReq, item *CacheItem) + // the rate limit item *after* the RateLimitRequest has been applied. + OnChange(ctx context.Context, r *RateLimitRequest, item *CacheItem) // Called by gubernator when a rate limit is missing from the cache. It's up to the store // to decide if this request is fulfilled. Should return true if the request is fulfilled // and false if the request is not fulfilled or doesn't exist in the store. - Get(ctx context.Context, r *RateLimitReq) (*CacheItem, bool) + Get(ctx context.Context, r *RateLimitRequest) (*CacheItem, bool) // Called by gubernator when an existing rate limit should be removed from the store. // NOTE: This is NOT called when an rate limit expires from the cache, store implementors @@ -95,12 +95,12 @@ type MockStore struct { var _ Store = &MockStore{} -func (ms *MockStore) OnChange(ctx context.Context, r *RateLimitReq, item *CacheItem) { +func (ms *MockStore) OnChange(ctx context.Context, r *RateLimitRequest, item *CacheItem) { ms.Called["OnChange()"] += 1 ms.CacheItems[item.Key] = item } -func (ms *MockStore) Get(ctx context.Context, r *RateLimitReq) (*CacheItem, bool) { +func (ms *MockStore) Get(ctx context.Context, r *RateLimitRequest) (*CacheItem, bool) { ms.Called["Get()"] += 1 item, ok := ms.CacheItems[r.HashKey()] return item, ok diff --git a/store_test.go b/store_test.go index e7c58f6..6fa3921 100644 --- a/store_test.go +++ b/store_test.go @@ -18,81 +18,42 @@ package gubernator_test import ( "context" - "fmt" - "net" "testing" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "google.golang.org/grpc" ) -type v1Server struct { - conf gubernator.Config - listener net.Listener - srv *gubernator.V1Instance -} - -func (s *v1Server) Close() error { - s.conf.GRPCServers[0].GracefulStop() - return s.srv.Close() -} - -// Start a single instance of V1Server with the provided config and listening address. -func newV1Server(t *testing.T, address string, conf gubernator.Config) *v1Server { - t.Helper() - conf.GRPCServers = append(conf.GRPCServers, grpc.NewServer()) - - srv, err := gubernator.NewV1Instance(conf) - require.NoError(t, err) - - listener, err := net.Listen("tcp", address) - require.NoError(t, err) - - go func() { - if err := conf.GRPCServers[0].Serve(listener); err != nil { - fmt.Printf("while serving: %s\n", err) - } - }() - - srv.SetPeers([]gubernator.PeerInfo{{GRPCAddress: listener.Addr().String(), IsOwner: true}}) - - ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) - - err = gubernator.WaitForConnect(ctx, []string{listener.Addr().String()}) - require.NoError(t, err) - cancel() - - return &v1Server{ - conf: conf, - listener: listener, - srv: srv, - } -} - func TestLoader(t *testing.T) { loader := gubernator.NewMockLoader() - srv := newV1Server(t, "localhost:0", gubernator.Config{ + d, err := gubernator.SpawnDaemon(context.Background(), gubernator.DaemonConfig{ + HTTPListenAddress: "localhost:0", Behaviors: gubernator.BehaviorConfig{ + // Suitable for testing but not production GlobalSyncWait: clock.Millisecond * 50, // Suitable for testing but not production GlobalTimeout: clock.Second, }, Loader: loader, }) + assert.NoError(t, err) + conf := d.Config() + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) + // loader.Load() should have been called for gubernator startup assert.Equal(t, 1, loader.Called["Load()"]) assert.Equal(t, 0, loader.Called["Save()"]) - client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) - assert.Nil(t, err) + client, err := gubernator.NewClient(gubernator.WithNoTLS(d.Listener.Addr().String())) + assert.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_over_limit", UniqueKey: "account:1234", @@ -102,14 +63,12 @@ func TestLoader(t *testing.T) { Hits: 1, }, }, - }) - require.Nil(t, err) - require.NotNil(t, resp) + }, &resp) + require.NoError(t, err) require.Equal(t, 1, len(resp.Responses)) require.Equal(t, "", resp.Responses[0].Error) - err = srv.Close() - require.NoError(t, err, "Error in srv.Close") + d.Close(context.Background()) // Loader.Save() should been called during gubernator shutdown assert.Equal(t, 1, loader.Called["Load()"]) @@ -126,31 +85,34 @@ func TestLoader(t *testing.T) { func TestStore(t *testing.T) { ctx := context.Background() - setup := func() (*MockStore2, *v1Server, gubernator.V1Client) { + setup := func() (*MockStore2, *gubernator.Daemon, gubernator.Client) { store := &MockStore2{} - srv := newV1Server(t, "localhost:0", gubernator.Config{ + d, err := gubernator.SpawnDaemon(context.Background(), gubernator.DaemonConfig{ + HTTPListenAddress: "localhost:0", Behaviors: gubernator.BehaviorConfig{ - GlobalSyncWait: clock.Millisecond * 50, // Suitable for testing but not production + GlobalSyncWait: clock.Millisecond * 50, GlobalTimeout: clock.Second, }, Store: store, }) + assert.NoError(t, err) + conf := d.Config() + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) - client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) + client, err := gubernator.NewClient(gubernator.WithNoTLS(d.Listener.Addr().String())) require.NoError(t, err) - return store, srv, client + return store, d, client } - tearDown := func(srv *v1Server) { - err := srv.Close() - require.NoError(t, err) + tearDown := func(d *gubernator.Daemon) { + d.Close(context.Background()) } // Create a mock argument matcher for a request by name/key. - matchReq := func(req *gubernator.RateLimitReq) interface{} { - return mock.MatchedBy(func(req2 *gubernator.RateLimitReq) bool { + matchReq := func(req *gubernator.RateLimitRequest) interface{} { + return mock.MatchedBy(func(req2 *gubernator.RateLimitRequest) bool { return req2.Name == req.Name && req2.UniqueKey == req.UniqueKey }) @@ -158,7 +120,7 @@ func TestStore(t *testing.T) { // Create a mock argument matcher for CacheItem input. // Verify item matches expected algorithm, limit, and duration. - matchItem := func(req *gubernator.RateLimitReq) interface{} { + matchItem := func(req *gubernator.RateLimitRequest) interface{} { switch req.Algorithm { case gubernator.Algorithm_TOKEN_BUCKET: return mock.MatchedBy(func(item *gubernator.CacheItem) bool { @@ -193,7 +155,7 @@ func TestStore(t *testing.T) { } // Create a bucket item matching the request. - createBucketItem := func(req *gubernator.RateLimitReq) interface{} { + createBucketItem := func(req *gubernator.RateLimitRequest) interface{} { switch req.Algorithm { case gubernator.Algorithm_TOKEN_BUCKET: return &gubernator.TokenBucketItem{ @@ -230,7 +192,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -244,12 +206,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -259,12 +222,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -275,7 +239,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -298,12 +262,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -314,7 +279,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -338,12 +303,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -360,7 +326,7 @@ func TestStore(t *testing.T) { oldDuration := int64(5000) newDuration := int64(8000) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -427,12 +393,13 @@ func TestStore(t *testing.T) { Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -447,7 +414,7 @@ func TestStore(t *testing.T) { oldDuration := int64(500000) newDuration := int64(8000) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -517,12 +484,13 @@ func TestStore(t *testing.T) { Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) diff --git a/tls_test.go b/tls_test.go index f6b56ba..cb78ba3 100644 --- a/tls_test.go +++ b/tls_test.go @@ -25,7 +25,7 @@ import ( "strings" "testing" - "github.com/gubernator-io/gubernator/v2" + "github.com/gubernator-io/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -39,18 +39,19 @@ func spawnDaemon(t *testing.T, conf gubernator.DaemonConfig) *gubernator.Daemon d, err := gubernator.SpawnDaemon(ctx, conf) cancel() require.NoError(t, err) - d.SetPeers([]gubernator.PeerInfo{{GRPCAddress: conf.GRPCListenAddress, IsOwner: true}}) + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) return d } func makeRequest(t *testing.T, conf gubernator.DaemonConfig) error { t.Helper() - client, err := gubernator.DialV1Server(conf.GRPCListenAddress, conf.TLS.ClientTLS) + client, err := gubernator.NewClient(gubernator.WithTLS(conf.ClientTLS(), conf.HTTPListenAddress)) require.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_tls", UniqueKey: "account:995", @@ -60,7 +61,7 @@ func makeRequest(t *testing.T, conf gubernator.DaemonConfig) error { Hits: 1, }, }, - }) + }, &resp) if err != nil { return err @@ -120,18 +121,18 @@ func TestSetupTLS(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: tt.tls, } d := spawnDaemon(t, conf) - client, err := gubernator.DialV1Server(conf.GRPCListenAddress, tt.tls.ClientTLS) + client, err := gubernator.NewClient(gubernator.WithTLS(conf.ClientTLS(), conf.HTTPListenAddress)) require.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_tls", UniqueKey: "account:995", @@ -141,21 +142,20 @@ func TestSetupTLS(t *testing.T) { Hits: 1, }, }, - }) + }, &resp) require.NoError(t, err) rl := resp.Responses[0] assert.Equal(t, "", rl.Error) assert.Equal(t, gubernator.Status_UNDER_LIMIT, rl.Status) assert.Equal(t, int64(99), rl.Remaining) - d.Close() + d.Close(context.Background()) }) } } func TestSetupTLSSkipVerify(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &gubernator.TLSConfig{ CaFile: "contrib/certs/ca.cert", @@ -165,7 +165,7 @@ func TestSetupTLSSkipVerify(t *testing.T) { } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) tls := &gubernator.TLSConfig{ AutoTLS: true, @@ -190,13 +190,12 @@ func TestSetupTLSClientAuth(t *testing.T) { } conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &serverTLS, } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) // Given generated client certs tls := &gubernator.TLSConfig{ @@ -211,7 +210,8 @@ func TestSetupTLSClientAuth(t *testing.T) { // Should not be allowed without a cert signed by the client CA err = makeRequest(t, conf) require.Error(t, err) - assert.Contains(t, err.Error(), "code = Unavailable desc") + // Error is different depending on golang version + //assert.Contains(t, err.Error(), "tls: certificate required") // Given the client auth certs tls = &gubernator.TLSConfig{ @@ -238,27 +238,23 @@ func TestTLSClusterWithClientAuthentication(t *testing.T) { } d1 := spawnDaemon(t, gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &serverTLS, }) - defer d1.Close() + defer d1.Close(context.Background()) d2 := spawnDaemon(t, gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9696", HTTPListenAddress: "127.0.0.1:9686", TLS: &serverTLS, }) - defer d2.Close() + defer d2.Close(context.Background()) peers := []gubernator.PeerInfo{ { - GRPCAddress: d1.GRPCListeners[0].Addr().String(), - HTTPAddress: d1.HTTPListener.Addr().String(), + HTTPAddress: d1.Listener.Addr().String(), }, { - GRPCAddress: d2.GRPCListeners[0].Addr().String(), - HTTPAddress: d2.HTTPListener.Addr().String(), + HTTPAddress: d2.Listener.Addr().String(), }, } d1.SetPeers(peers) @@ -281,13 +277,12 @@ func TestTLSClusterWithClientAuthentication(t *testing.T) { b, err := io.ReadAll(resp.Body) require.NoError(t, err) - // Should have called GetPeerRateLimits on d2 - assert.Contains(t, string(b), `{method="/pb.gubernator.PeersV1/GetPeerRateLimits"} 1`) + // Should have called /v1/peer.forward on d2 + assert.Contains(t, string(b), `{path="`+gubernator.RPCPeerForward+`"} 1`) } func TestHTTPSClientAuth(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", HTTPStatusListenAddress: "127.0.0.1:9686", TLS: &gubernator.TLSConfig{ @@ -299,7 +294,7 @@ func TestHTTPSClientAuth(t *testing.T) { } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) clientWithCert := &http.Client{ Transport: &http.Transport{ @@ -315,9 +310,9 @@ func TestHTTPSClientAuth(t *testing.T) { }, } - reqCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/v1/HealthCheck", conf.HTTPListenAddress), nil) + reqCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/healthz", conf.HTTPListenAddress), nil) require.NoError(t, err) - reqNoClientCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/v1/HealthCheck", conf.HTTPStatusListenAddress), nil) + reqNoClientCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/healthz", conf.HTTPStatusListenAddress), nil) require.NoError(t, err) // Test that a client without a cert can access /v1/HealthCheck at status address @@ -326,18 +321,19 @@ func TestHTTPSClientAuth(t *testing.T) { defer resp.Body.Close() b, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.Equal(t, `{"status":"healthy","message":"","peer_count":1}`, strings.ReplaceAll(string(b), " ", "")) + assert.Equal(t, `{"status":"healthy","peerCount":1}`, strings.ReplaceAll(string(b), " ", "")) // Verify we get an error when we try to access existing HTTPListenAddress without cert - //nolint:bodyclose // Expect error, no body to close. - _, err = clientWithoutCert.Do(reqCertRequired) + _, err = clientWithoutCert.Do(reqCertRequired) //nolint:all require.Error(t, err) + // The error message is different depending on what version of golang is being used + //assert.Contains(t, err.Error(), "remote error: tls: certificate required") - // Check that with a valid client cert we can access /v1/HealthCheck at existing HTTPListenAddress - resp2, err := clientWithCert.Do(reqCertRequired) + // Check that with a valid client cert we can access /v1/healthz at existing HTTPListenAddress + resp3, err := clientWithCert.Do(reqCertRequired) require.NoError(t, err) - defer resp2.Body.Close() - b, err = io.ReadAll(resp2.Body) + defer resp3.Body.Close() + b, err = io.ReadAll(resp3.Body) require.NoError(t, err) - assert.Equal(t, `{"status":"healthy","message":"","peer_count":1}`, strings.ReplaceAll(string(b), " ", "")) + assert.Equal(t, `{"status":"healthy","peerCount":1}`, strings.ReplaceAll(string(b), " ", "")) } diff --git a/workers.go b/workers.go index 34d99d1..cd929c1 100644 --- a/workers.go +++ b/workers.go @@ -61,14 +61,14 @@ type WorkerPool struct { } type Worker struct { - name string - conf *Config - cache Cache - getRateLimitRequest chan request - storeRequest chan workerStoreRequest - loadRequest chan workerLoadRequest - addCacheItemRequest chan workerAddCacheItemRequest - getCacheItemRequest chan workerGetCacheItemRequest + name string + conf *Config + cache Cache + getRateLimitRequestuest chan request + storeRequest chan workerStoreRequest + loadRequest chan workerLoadRequest + addCacheItemRequest chan workerAddCacheItemRequest + getCacheItemRequest chan workerGetCacheItemRequest } type workerHasher interface { @@ -162,13 +162,13 @@ func (p *WorkerPool) Close() error { // Create a new pool worker instance. func (p *WorkerPool) newWorker() *Worker { worker := &Worker{ - conf: p.conf, - cache: p.conf.CacheFactory(p.workerCacheSize), - getRateLimitRequest: make(chan request), - storeRequest: make(chan workerStoreRequest), - loadRequest: make(chan workerLoadRequest), - addCacheItemRequest: make(chan workerAddCacheItemRequest), - getCacheItemRequest: make(chan workerGetCacheItemRequest), + conf: p.conf, + cache: p.conf.CacheFactory(p.workerCacheSize), + getRateLimitRequestuest: make(chan request), + storeRequest: make(chan workerStoreRequest), + loadRequest: make(chan workerLoadRequest), + addCacheItemRequest: make(chan workerAddCacheItemRequest), + getCacheItemRequest: make(chan workerGetCacheItemRequest), } workerNumber := atomic.AddInt64(&workerCounter, 1) - 1 worker.name = strconv.FormatInt(workerNumber, 10) @@ -191,7 +191,7 @@ func (p *WorkerPool) dispatch(worker *Worker) { for { // Dispatch requests from each channel. select { - case req, ok := <-worker.getRateLimitRequest: + case req, ok := <-worker.getRateLimitRequestuest: if !ok { // Channel closed. Unexpected, but should be handled. logrus.Error("workerPool worker stopped because channel closed") @@ -258,7 +258,7 @@ func (p *WorkerPool) dispatch(worker *Worker) { } // GetRateLimit sends a GetRateLimit request to worker pool. -func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq, reqState RateLimitReqState) (*RateLimitResp, error) { +func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitRequest, reqState RateLimitRequestState) (*RateLimitResponse, error) { // Delegate request to assigned channel based on request key. worker := p.getWorker(rlRequest.HashKey()) queueGauge := metricWorkerQueue.WithLabelValues("GetRateLimit", worker.name) @@ -273,7 +273,7 @@ func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq, // Send request. select { - case worker.getRateLimitRequest <- handlerRequest: + case worker.getRateLimitRequestuest <- handlerRequest: // Successfully sent request. case <-ctx.Done(): return nil, ctx.Err() @@ -290,9 +290,9 @@ func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq, } // Handle request received by worker. -func (worker *Worker) handleGetRateLimit(ctx context.Context, req *RateLimitReq, reqState RateLimitReqState, cache Cache) (*RateLimitResp, error) { +func (worker *Worker) handleGetRateLimit(ctx context.Context, req *RateLimitRequest, reqState RateLimitRequestState, cache Cache) (*RateLimitResponse, error) { defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Worker.handleGetRateLimit")).ObserveDuration() - var rlResponse *RateLimitResp + var rlResponse *RateLimitResponse var err error switch req.Algorithm { diff --git a/workers_test.go b/workers_test.go index 4e77960..b3c6b9f 100644 --- a/workers_test.go +++ b/workers_test.go @@ -22,7 +22,7 @@ import ( "sort" "testing" - guber "github.com/gubernator-io/gubernator/v2" + guber "github.com/gubernator-io/gubernator/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require"