Skip to content

Commit

Permalink
feat(pyroscope/scrape): add support for configuring CPU profile's dur…
Browse files Browse the repository at this point in the history
…ation scraped by `pyroscope.scrape`.

Signed-off-by: hainenber <[email protected]>
  • Loading branch information
hainenber committed Apr 5, 2024
1 parent 2e95228 commit f966670
Show file tree
Hide file tree
Showing 6 changed files with 109 additions and 18 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ Main (unreleased)
- Add automatic conversion for `legacy_positions_file` in component `loki.source.file`. (@mattdurham)

- Propagate request metadata for `faro.receiver` to downstream components. (@hainenber)

- Add support for configuring CPU profile's duration scraped by `pyroscope.scrape`. (@hainenber)

### Features

Expand Down
10 changes: 7 additions & 3 deletions docs/sources/flow/reference/components/pyroscope.scrape.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ Name | Type | Description
`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no
`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"15s"` | no
`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. Must be larger than `scrape_interval`. | `"18s"` | no
`profiling_duration`| `duration` | The duration for a delta profiling to be scraped. Must be larger than 1 second. | `"14s"` | no
`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | `"http"` | no
`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no
`bearer_token` | `secret` | Bearer token to authenticate with. | | no
Expand Down Expand Up @@ -387,7 +388,8 @@ Name | Type | Description | Default | Required
`delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no

When the `delta` argument is `true`, a `seconds` query parameter is
automatically added to requests. The `seconds` used will be equal to `scrape_interval - 1`.
automatically added to requests. The `seconds` used will be, by default to `scrape_interval - 1`
, or `profiling_duration` if specified.

### clustering block

Expand Down Expand Up @@ -421,8 +423,10 @@ When the `delta` argument is `false`, the [pprof][] HTTP query will be instantan
When the `delta` argument is `true`:
* The [pprof][] HTTP query will run for a certain amount of time.
* A `seconds` parameter is automatically added to the HTTP request.
* The `seconds` used will be equal to `scrape_interval - 1`.
For example, if `scrape_interval` is `"15s"`, `seconds` will be 14 seconds.
* The `seconds` used, by default, will be equal to `scrape_interval - 1`
or to `profiling_duration` if specified.
For example, if `scrape_interval` is `"15s"`, `seconds` will be 14 seconds
If `profiling_duration` is set as `"16s"`, `seconds` will be 16 seconds regardless of `scrape_interval`
If the HTTP endpoint is `/debug/pprof/profile`, then the HTTP query will become `/debug/pprof/profile?seconds=14`

## Exported fields
Expand Down
36 changes: 22 additions & 14 deletions internal/component/pyroscope/scrape/scrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,17 @@ import (
)

const (
pprofMemory string = "memory"
pprofBlock string = "block"
pprofGoroutine string = "goroutine"
pprofMutex string = "mutex"
pprofProcessCPU string = "process_cpu"
pprofFgprof string = "fgprof"
pprofGoDeltaProfMemory string = "godeltaprof_memory"
pprofGoDeltaProfBlock string = "godeltaprof_block"
pprofGoDeltaProfMutex string = "godeltaprof_mutex"
pprofMemory string = "memory"
pprofBlock string = "block"
pprofGoroutine string = "goroutine"
pprofMutex string = "mutex"
pprofProcessCPU string = "process_cpu"
pprofFgprof string = "fgprof"
pprofGoDeltaProfMemory string = "godeltaprof_memory"
pprofGoDeltaProfBlock string = "godeltaprof_block"
pprofGoDeltaProfMutex string = "godeltaprof_mutex"
defaultScrapeInterval time.Duration = 15 * time.Second
defaultProfilingDuration time.Duration = 14 * time.Second
)

func init() {
Expand Down Expand Up @@ -60,6 +62,8 @@ type Arguments struct {
ScrapeTimeout time.Duration `river:"scrape_timeout,attr,optional"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `river:"scheme,attr,optional"`
// The duration for a profile to be scrapped.
ProfilingDuration time.Duration `river:"profiling_duration,attr,optional"`

// todo(ctovena): add support for limits.
// // An uncompressed response body larger than this many bytes will cause the
Expand Down Expand Up @@ -192,11 +196,12 @@ var DefaultArguments = NewDefaultArguments()
// NewDefaultArguments create the default settings for a scrape job.
func NewDefaultArguments() Arguments {
return Arguments{
Scheme: "http",
HTTPClientConfig: component_config.DefaultHTTPClientConfig,
ScrapeInterval: 15 * time.Second,
ScrapeTimeout: 10 * time.Second,
ProfilingConfig: DefaultProfilingConfig,
Scheme: "http",
HTTPClientConfig: component_config.DefaultHTTPClientConfig,
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: 18 * time.Second,
ProfilingDuration: defaultProfilingDuration,
ProfilingConfig: DefaultProfilingConfig,
}
}

Expand All @@ -215,6 +220,9 @@ func (arg *Arguments) Validate() error {
// ProfilingTarget.Delta is true the ScrapeInterval - 1s is propagated in
// the `seconds` parameter and it must be >= 1.
for _, target := range arg.ProfilingConfig.AllTargets() {
if target.Enabled && target.Delta && arg.ProfilingDuration.Seconds() <= 1 {
return fmt.Errorf("profiling_duration must be larger then 1 second when using delta profiling")
}
if target.Enabled && target.Delta && arg.ScrapeInterval.Seconds() < 2 {
return fmt.Errorf("scrape_interval must be at least 2 seconds when using delta profiling")
}
Expand Down
10 changes: 10 additions & 0 deletions internal/component/pyroscope/scrape/scrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,16 @@ func TestUnmarshalConfig(t *testing.T) {
`,
expectedErr: "scrape_interval must be at least 2 seconds when using delta profiling",
},
"invalid cpu profiling_duration": {
in: `
targets = []
forward_to = null
scrape_timeout = "1s"
scrape_interval = "10s"
profiling_duration = "1s"
`,
expectedErr: "profiling_duration must be larger then 1 second when using delta profiling",
},
"allow short scrape_intervals without delta": {
in: `
targets = []
Expand Down
6 changes: 5 additions & 1 deletion internal/component/pyroscope/scrape/target.go
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,11 @@ func targetsFromGroup(group *targetgroup.Group, cfg Arguments, targetTypes map[s
}

if pcfg, found := targetTypes[profType]; found && pcfg.Delta {
params.Add("seconds", strconv.Itoa(int((cfg.ScrapeInterval)/time.Second)-1))
seconds := (cfg.ScrapeInterval)/time.Second - 1
if cfg.ProfilingDuration != defaultProfilingDuration {
seconds = (cfg.ProfilingDuration) / time.Second
}
params.Add("seconds", strconv.Itoa(int(seconds)))
}
targets = append(targets, NewTarget(lbls, origLabels, params))
}
Expand Down
63 changes: 63 additions & 0 deletions internal/component/pyroscope/scrape/target_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"net/url"
"sort"
"testing"
"time"

"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
Expand Down Expand Up @@ -236,3 +237,65 @@ func Test_targetsFromGroup(t *testing.T) {
require.Equal(t, expected, active)
require.Empty(t, dropped)
}

func Test_targetsFromGroup_withSpecifiedProfilingDuration(t *testing.T) {
args := NewDefaultArguments()
args.ProfilingConfig.Block.Enabled = false
args.ProfilingConfig.Goroutine.Enabled = false
args.ProfilingConfig.Mutex.Enabled = false
args.ProfilingDuration = 20 * time.Second

active, dropped, err := targetsFromGroup(&targetgroup.Group{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
Labels: model.LabelSet{
"foo": "bar",
},
}, args, args.ProfilingConfig.AllTargets())
expected := []*Target{
// unspecified
NewTarget(
labels.FromMap(map[string]string{
model.AddressLabel: "localhost:9090",
serviceNameLabel: "unspecified",
model.MetricNameLabel: pprofMemory,
ProfilePath: "/debug/pprof/allocs",
model.SchemeLabel: "http",
"foo": "bar",
"instance": "localhost:9090",
}),
labels.FromMap(map[string]string{
model.AddressLabel: "localhost:9090",
model.MetricNameLabel: pprofMemory,
ProfilePath: "/debug/pprof/allocs",
model.SchemeLabel: "http",
"foo": "bar",
}),
url.Values{}),
NewTarget(
labels.FromMap(map[string]string{
model.AddressLabel: "localhost:9090",
serviceNameLabel: "unspecified",
model.MetricNameLabel: pprofProcessCPU,
ProfilePath: "/debug/pprof/profile",
model.SchemeLabel: "http",
"foo": "bar",
"instance": "localhost:9090",
}),
labels.FromMap(map[string]string{
model.AddressLabel: "localhost:9090",
model.MetricNameLabel: pprofProcessCPU,
ProfilePath: "/debug/pprof/profile",
model.SchemeLabel: "http",
"foo": "bar",
}),
url.Values{"seconds": []string{"20"}}),
}

require.NoError(t, err)
sort.Sort(Targets(active))
sort.Sort(Targets(expected))
require.Equal(t, expected, active)
require.Empty(t, dropped)
}

0 comments on commit f966670

Please sign in to comment.