Skip to content

Commit

Permalink
Local rate limit - add rate_limited_as_resource_exhausted flag (envoy…
Browse files Browse the repository at this point in the history
…proxy#29279)

* Local rate limit - add rate_limited_as_resource_exhausted flag to change grpc status code

Signed-off-by: Pawan Kumar <[email protected]>

Signed-off-by: Pawan Bishnoi <[email protected]>

* fix minor renaming error

Signed-off-by: Pawan Bishnoi <[email protected]>

* fix precheck error

Signed-off-by: Pawan Bishnoi <[email protected]>

* add test

Signed-off-by: Pawan Bishnoi <[email protected]>

* fix format and typo

Signed-off-by: Pawan Bishnoi <[email protected]>

* review comments

Signed-off-by: Pawan Bishnoi <[email protected]>

---------

Signed-off-by: Pawan Bishnoi <[email protected]>
  • Loading branch information
Pawan-Bishnoi authored Oct 25, 2023
1 parent b9e4260 commit 88a80e6
Show file tree
Hide file tree
Showing 5 changed files with 73 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Local Rate limit :ref:`configuration overview <config_http_filters_local_rate_limit>`.
// [#extension: envoy.filters.http.local_ratelimit]

// [#next-free-field: 15]
// [#next-free-field: 16]
message LocalRateLimit {
// The human readable prefix to use when emitting stats.
string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
Expand Down Expand Up @@ -125,4 +125,9 @@ message LocalRateLimit {
// no matching descriptor. If set to true, default token bucket will always
// be consumed. Default is true.
google.protobuf.BoolValue always_consume_default_token_bucket = 14;

// Specifies whether a ``RESOURCE_EXHAUSTED`` gRPC code must be returned instead
// of the default ``UNAVAILABLE`` gRPC code for a rate limited gRPC call. The
// HTTP code will be 200 for a gRPC response.
bool rate_limited_as_resource_exhausted = 15;
}
5 changes: 5 additions & 0 deletions changelogs/current.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ behavior_changes:

minor_behavior_changes:
# *Changes that may cause incompatibilities for some users, but should not for most*
- area: local_rate_limit
change: |
Added new configuration field :ref:`rate_limited_as_resource_exhausted
<envoy_v3_api_field_extensions.filters.http.local_ratelimit.v3.LocalRateLimit.rate_limited_as_resource_exhausted>`
to allow for setting if rate limit grpc response should be RESOURCE_EXHAUSTED instead of the default UNAVAILABLE.
bug_fixes:
# *Changes expected to improve the state of the world and are unlikely to have negative effects*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,11 @@ FilterConfig::FilterConfig(
has_descriptors_(!config.descriptors().empty()),
enable_x_rate_limit_headers_(config.enable_x_ratelimit_headers() ==
envoy::extensions::common::ratelimit::v3::DRAFT_VERSION_03),
vh_rate_limits_(config.vh_rate_limits()) {
vh_rate_limits_(config.vh_rate_limits()),
rate_limited_grpc_status_(
config.rate_limited_as_resource_exhausted()
? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted)
: absl::nullopt) {
// Note: no token bucket is fine for the global config, which would be the case for enabling
// the filter globally but disabled and then applying limits at the virtual host or
// route level. At the virtual or route level, it makes no sense to have an no token
Expand Down Expand Up @@ -147,7 +151,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers,
[this, config](Http::HeaderMap& headers) {
config->responseHeadersParser().evaluateHeaders(headers, decoder_callbacks_->streamInfo());
},
absl::nullopt, "local_rate_limited");
config->rateLimitedGrpcStatus(), "local_rate_limited");
decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited);

return Http::FilterHeadersStatus::StopIteration;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ class FilterConfig : public Router::RouteSpecificFilterConfig {
return vh_rate_limits_;
}
bool consumeDefaultTokenBucket() const { return always_consume_default_token_bucket_; }
const absl::optional<Grpc::Status::GrpcStatus> rateLimitedGrpcStatus() const {
return rate_limited_grpc_status_;
}

private:
friend class FilterTest;
Expand Down Expand Up @@ -147,6 +150,7 @@ class FilterConfig : public Router::RouteSpecificFilterConfig {
const bool has_descriptors_;
const bool enable_x_rate_limit_headers_;
const envoy::extensions::common::ratelimit::v3::VhRateLimitsOptions vh_rate_limits_;
const absl::optional<Grpc::Status::GrpcStatus> rate_limited_grpc_status_;
};

using FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;
Expand Down
62 changes: 52 additions & 10 deletions test/extensions/filters/http/local_ratelimit/filter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ namespace LocalRateLimitFilter {

static const std::string config_yaml = R"(
stat_prefix: test
rate_limited_as_resource_exhausted: {}
token_bucket:
max_tokens: {}
tokens_per_fill: 1
Expand Down Expand Up @@ -105,25 +106,25 @@ class FilterTest : public testing::Test {
};

TEST_F(FilterTest, Runtime) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "\"OFF\""), false, false);
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "\"OFF\""), false, false);
EXPECT_EQ(&runtime_, &(config_->runtime()));
}

TEST_F(FilterTest, ToErrorCode) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "\"OFF\""), false, false);
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "\"OFF\""), false, false);
EXPECT_EQ(Http::Code::BadRequest, toErrorCode(400));
}

TEST_F(FilterTest, Disabled) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "\"OFF\""), false, false);
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "\"OFF\""), false, false);
auto headers = Http::TestRequestHeaderMapImpl();
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));
EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enabled"));
EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enforced"));
}

TEST_F(FilterTest, RequestOk) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "\"OFF\""));
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "\"OFF\""));
auto headers = Http::TestRequestHeaderMapImpl();
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));
EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_2_->decodeHeaders(headers, false));
Expand All @@ -134,7 +135,7 @@ TEST_F(FilterTest, RequestOk) {
}

TEST_F(FilterTest, RequestOkPerConnection) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "true", "\"OFF\""));
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "true", "\"OFF\""));
auto headers = Http::TestRequestHeaderMapImpl();
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false));
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_2_->decodeHeaders(headers, false));
Expand All @@ -145,7 +146,7 @@ TEST_F(FilterTest, RequestOkPerConnection) {
}

TEST_F(FilterTest, RequestRateLimited) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "\"OFF\""));
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "\"OFF\""));

EXPECT_CALL(decoder_callbacks_2_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _))
.WillOnce(Invoke([](Http::Code code, absl::string_view body,
Expand All @@ -165,7 +166,6 @@ TEST_F(FilterTest, RequestRateLimited) {
EXPECT_EQ("123", response_headers.get(Http::LowerCaseString("test-resp-req-id"))[0]
->value()
.getStringView());

EXPECT_EQ(grpc_status, absl::nullopt);
EXPECT_EQ(details, "local_rate_limited");
}));
Expand All @@ -186,14 +186,56 @@ TEST_F(FilterTest, RequestRateLimited) {
EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.rate_limited"));
}

TEST_F(FilterTest, RequestRateLimitedResourceExhausted) {
setup(fmt::format(fmt::runtime(config_yaml), "true", "1", "false", "\"OFF\""));

EXPECT_CALL(decoder_callbacks_2_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _))
.WillOnce(Invoke([](Http::Code code, absl::string_view body,
std::function<void(Http::ResponseHeaderMap & headers)> modify_headers,
const absl::optional<Grpc::Status::GrpcStatus> grpc_status,
absl::string_view details) {
EXPECT_EQ(Http::Code::TooManyRequests, code);
EXPECT_EQ("local_rate_limited", body);

Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}};
modify_headers(response_headers);
EXPECT_EQ("true", response_headers.get(Http::LowerCaseString("x-test-rate-limit"))[0]
->value()
.getStringView());
// Make sure that generated local reply headers contain a value dynamically
// generated by header formatter REQ(test-req-id)
EXPECT_EQ("123", response_headers.get(Http::LowerCaseString("test-resp-req-id"))[0]
->value()
.getStringView());
EXPECT_EQ(grpc_status,
absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted));
EXPECT_EQ(details, "local_rate_limited");
}));

// Add a custom header to the request.
// Locally generated reply is configured to refer to this value.
Http::TestRequestHeaderMapImpl request_headers{{"test-req-id", "123"}};
NiceMock<StreamInfo::MockStreamInfo> stream_info;

EXPECT_CALL(decoder_callbacks_2_, streamInfo).WillRepeatedly(testing::ReturnRef(stream_info));
EXPECT_CALL(stream_info, getRequestHeaders).WillRepeatedly(Return(&request_headers));
EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false));
EXPECT_EQ(Http::FilterHeadersStatus::StopIteration,
filter_2_->decodeHeaders(request_headers, false));
EXPECT_EQ(2U, findCounter("test.http_local_rate_limit.enabled"));
EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.enforced"));
EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.ok"));
EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.rate_limited"));
}

/*
This test sets 'local_rate_limit_per_downstream_connection' to true. Doing this enables per
connection rate limiting and even though 'max_token' is set to 1, it allows 2 requests to go through
- one on each connection. This is in contrast to the 'RequestOk' test above where only 1 request is
allowed (across the process) for the same configuration.
*/
TEST_F(FilterTest, RequestRateLimitedPerConnection) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "true", "\"OFF\""));
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "true", "\"OFF\""));

EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _))
.WillOnce(Invoke([](Http::Code code, absl::string_view body,
Expand Down Expand Up @@ -230,7 +272,7 @@ TEST_F(FilterTest, RequestRateLimitedPerConnection) {
}

TEST_F(FilterTest, RequestRateLimitedButNotEnforced) {
setup(fmt::format(fmt::runtime(config_yaml), "0", "false", "\"OFF\""), true, false);
setup(fmt::format(fmt::runtime(config_yaml), "false", "0", "false", "\"OFF\""), true, false);

EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _)).Times(0);

Expand All @@ -246,7 +288,7 @@ TEST_F(FilterTest, RequestRateLimitedButNotEnforced) {
}

TEST_F(FilterTest, RequestRateLimitedXRateLimitHeaders) {
setup(fmt::format(fmt::runtime(config_yaml), "1", "false", "DRAFT_VERSION_03"));
setup(fmt::format(fmt::runtime(config_yaml), "false", "1", "false", "DRAFT_VERSION_03"));

auto request_headers = Http::TestRequestHeaderMapImpl();
auto response_headers = Http::TestResponseHeaderMapImpl();
Expand Down

0 comments on commit 88a80e6

Please sign in to comment.