Skip to content

Commit

Permalink
Merge branch 'main' into 3420
Browse files Browse the repository at this point in the history
  • Loading branch information
iblancasa authored Nov 5, 2024
2 parents c0b52cd + 05228b9 commit b1ecbb8
Show file tree
Hide file tree
Showing 113 changed files with 6,134 additions and 194 deletions.
16 changes: 16 additions & 0 deletions .chloggen/2779-kubeletstatsreiver-inject-en-vars.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: collector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Inject environment K8S_NODE_NAME environment variable for the Kubelet Stats Receiver.

# One or more tracking issues related to the change
issues: [2779]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
16 changes: 16 additions & 0 deletions .chloggen/2947-updating-ds-sf-depl-mutation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: collector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Fix mutation of deployments, statefulsets, and daemonsets allowing to remove fields on update"

# One or more tracking issues related to the change
issues: [2947]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
16 changes: 16 additions & 0 deletions .chloggen/3332-musl-python-autoinstrumentation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: auto-instrumentation

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: add config for installing musl based auto-instrumentation for Python

# One or more tracking issues related to the change
issues: [2264]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
16 changes: 16 additions & 0 deletions .chloggen/scrape-config-probe.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: enables support for pulling scrape config and probe CRDs in the target allocator

# One or more tracking issues related to the change
issues: [1842]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -292,9 +292,12 @@ instrumentation.opentelemetry.io/inject-nodejs: "true"
```

Python:
Python auto-instrumentation also honors an annotation that will permit it to run it on images with a different C library than glibc.

```bash
instrumentation.opentelemetry.io/inject-python: "true"
instrumentation.opentelemetry.io/otel-python-platform: "glibc" # for Linux glibc based images, this is the default value and can be omitted
instrumentation.opentelemetry.io/otel-python-platform: "musl" # for Linux musl based images
```

.NET:
Expand Down
40 changes: 40 additions & 0 deletions apis/v1beta1/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,42 @@ func (c *Config) getPortsForComponentKinds(logger logr.Logger, componentKinds ..
return ports, nil
}

// getEnvironmentVariablesForComponentKinds gets the environment variables for the given ComponentKind(s).
func (c *Config) getEnvironmentVariablesForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]corev1.EnvVar, error) {
var envVars []corev1.EnvVar = []corev1.EnvVar{}
enabledComponents := c.GetEnabledComponents()
for _, componentKind := range componentKinds {
var retriever components.ParserRetriever
var cfg AnyConfig

switch componentKind {
case KindReceiver:
retriever = receivers.ReceiverFor
cfg = c.Receivers
case KindExporter:
continue
case KindProcessor:
continue
case KindExtension:
continue
}
for componentName := range enabledComponents[componentKind] {
parser := retriever(componentName)
if parsedEnvVars, err := parser.GetEnvironmentVariables(logger, cfg.Object[componentName]); err != nil {
return nil, err
} else {
envVars = append(envVars, parsedEnvVars...)
}
}
}

sort.Slice(envVars, func(i, j int) bool {
return envVars[i].Name < envVars[j].Name
})

return envVars, nil
}

// applyDefaultForComponentKinds applies defaults to the endpoints for the given ComponentKind(s).
func (c *Config) applyDefaultForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) error {
if err := c.Service.ApplyDefaults(); err != nil {
Expand Down Expand Up @@ -286,6 +322,10 @@ func (c *Config) GetAllPorts(logger logr.Logger) ([]corev1.ServicePort, error) {
return c.getPortsForComponentKinds(logger, KindReceiver, KindExporter)
}

func (c *Config) GetEnvironmentVariables(logger logr.Logger) ([]corev1.EnvVar, error) {
return c.getEnvironmentVariablesForComponentKinds(logger, KindReceiver)
}

func (c *Config) GetAllRbacRules(logger logr.Logger) ([]rbacv1.PolicyRule, error) {
return c.getRbacRulesForComponentKinds(logger, KindReceiver, KindExporter, KindProcessor)
}
Expand Down
60 changes: 60 additions & 0 deletions apis/v1beta1/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -423,6 +423,66 @@ func TestConfig_GetEnabledComponents(t *testing.T) {
}
}

func TestConfig_getEnvironmentVariablesForComponentKinds(t *testing.T) {
tests := []struct {
name string
config *Config
componentKinds []ComponentKind
envVarsLen int
}{
{
name: "no env vars",
config: &Config{
Receivers: AnyConfig{
Object: map[string]interface{}{
"myreceiver": map[string]interface{}{
"env": "test",
},
},
},
Service: Service{
Pipelines: map[string]*Pipeline{
"test": {
Receivers: []string{"myreceiver"},
},
},
},
},
componentKinds: []ComponentKind{KindReceiver},
envVarsLen: 0,
},
{
name: "kubeletstats env vars",
config: &Config{
Receivers: AnyConfig{
Object: map[string]interface{}{
"kubeletstats": map[string]interface{}{},
},
},
Service: Service{
Pipelines: map[string]*Pipeline{
"test": {
Receivers: []string{"kubeletstats"},
},
},
},
},
componentKinds: []ComponentKind{KindReceiver},
envVarsLen: 1,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger := logr.Discard()
envVars, err := tt.config.GetEnvironmentVariables(logger)

assert.NoError(t, err)
assert.Len(t, envVars, tt.envVarsLen)
})
}
}

func TestConfig_GetReceiverPorts(t *testing.T) {
tests := []struct {
name string
Expand Down
175 changes: 175 additions & 0 deletions cmd/otel-allocator/benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package main

import (
"context"
"fmt"
"os"
"testing"

gokitlog "github.com/go-kit/log"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/relabel"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"

"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation"
"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/prehook"
"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/server"
"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/target"
)

// BenchmarkProcessTargets benchmarks the whole target allocation pipeline. It starts with data the prometheus
// discovery manager would normally output, and pushes it all the way into the allocator. It notably doe *not* check
// the HTTP server afterward. Test data is chosen to be reasonably representative of what the Prometheus service discovery
// outputs in the real world.
func BenchmarkProcessTargets(b *testing.B) {
numTargets := 10000
targetsPerGroup := 5
groupsPerJob := 20
tsets := prepareBenchmarkData(numTargets, targetsPerGroup, groupsPerJob)

b.ResetTimer()
for _, strategy := range allocation.GetRegisteredAllocatorNames() {
b.Run(strategy, func(b *testing.B) {
targetDiscoverer, allocator := createTestDiscoverer(strategy, map[string][]*relabel.Config{})
for i := 0; i < b.N; i++ {
targetDiscoverer.ProcessTargets(tsets, allocator.SetTargets)
}
})
}
}

// BenchmarkProcessTargetsWithRelabelConfig is BenchmarkProcessTargets with a relabel config set. The relabel config
// does not actually modify any records, but does force the prehook to perform any necessary conversions along the way.
func BenchmarkProcessTargetsWithRelabelConfig(b *testing.B) {
numTargets := 10000
targetsPerGroup := 5
groupsPerJob := 20
tsets := prepareBenchmarkData(numTargets, targetsPerGroup, groupsPerJob)
prehookConfig := make(map[string][]*relabel.Config, len(tsets))
for jobName := range tsets {
prehookConfig[jobName] = []*relabel.Config{
{
Action: "keep",
Regex: relabel.MustNewRegexp(".*"),
},
}
}

b.ResetTimer()
for _, strategy := range allocation.GetRegisteredAllocatorNames() {
b.Run(strategy, func(b *testing.B) {
targetDiscoverer, allocator := createTestDiscoverer(strategy, prehookConfig)
for i := 0; i < b.N; i++ {
targetDiscoverer.ProcessTargets(tsets, allocator.SetTargets)
}
})
}
}

func prepareBenchmarkData(numTargets, targetsPerGroup, groupsPerJob int) map[string][]*targetgroup.Group {
numGroups := numTargets / targetsPerGroup
numJobs := numGroups / groupsPerJob
jobNamePrefix := "test-"
groupLabels := model.LabelSet{
"__meta_kubernetes_pod_controller_name": "example",
"__meta_kubernetes_pod_ip": "10.244.0.251",
"__meta_kubernetes_pod_uid": "676ebee7-14f8-481e-a937-d2affaec4105",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_service_annotation_kubectl_kubernetes_io_last_applied_configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"example\"},\"name\":\"example-svc\",\"namespace\":\"example\"},\"spec\":{\"clusterIP\":\"None\",\"ports\":[{\"name\":\"http-example\",\"port\":9006,\"targetPort\":9006}],\"selector\":{\"app\":\"example\"},\"type\":\"ClusterIP\"}}\n",
"__meta_kubernetes_endpointslice_labelpresent_app": "true",
"__meta_kubernetes_endpointslice_name": "example-svc-qgwxf",
"__address__": "10.244.0.251:9006",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_pod_labelpresent_pod_template_hash": "true",
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "example-svc",
"__meta_kubernetes_endpointslice_labelpresent_service_kubernetes_io_headless": "true",
"__meta_kubernetes_pod_label_pod_template_hash": "6b549885f8",
"__meta_kubernetes_endpointslice_address_target_name": "example-6b549885f8-7tbcw",
"__meta_kubernetes_pod_labelpresent_app": "true",
"somelabel": "somevalue",
}
exampleTarget := model.LabelSet{
"__meta_kubernetes_endpointslice_port": "9006",
"__meta_kubernetes_service_label_app": "example",
"__meta_kubernetes_endpointslice_port_name": "http-example",
"__meta_kubernetes_pod_ready": "true",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_label_endpointslice_kubernetes_io_managed_by": "endpointslice-controller.k8s.io",
"__meta_kubernetes_endpointslice_labelpresent_endpointslice_kubernetes_io_managed_by": "true",
"__meta_kubernetes_endpointslice_label_app": "example",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_pod_phase": "Running",
"__meta_kubernetes_pod_controller_kind": "ReplicaSet",
"__meta_kubernetes_service_annotationpresent_kubectl_kubernetes_io_last_applied_configuration": "true",
"__meta_kubernetes_service_labelpresent_app": "true",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_annotation_endpoints_kubernetes_io_last_change_trigger_time": "2023-09-27T16:01:29Z",
"__meta_kubernetes_pod_name": "example-6b549885f8-7tbcw",
"__meta_kubernetes_service_name": "example-svc",
"__meta_kubernetes_namespace": "example",
"__meta_kubernetes_endpointslice_annotationpresent_endpoints_kubernetes_io_last_change_trigger_time": "true",
"__meta_kubernetes_pod_node_name": "kind-control-plane",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_pod_host_ip": "172.18.0.2",
"__meta_kubernetes_endpointslice_label_service_kubernetes_io_headless": "",
"__meta_kubernetes_pod_label_app": "example",
}
targets := []model.LabelSet{}
for i := 0; i < numTargets; i++ {
targets = append(targets, exampleTarget.Clone())
}
groups := make([]*targetgroup.Group, numGroups)
for i := 0; i < numGroups; i++ {
groupTargets := targets[(i * targetsPerGroup):(i*targetsPerGroup + targetsPerGroup)]
groups[i] = &targetgroup.Group{
Labels: groupLabels,
Targets: groupTargets,
}
}
tsets := make(map[string][]*targetgroup.Group, numJobs)
for i := 0; i < numJobs; i++ {
jobGroups := groups[(i * groupsPerJob):(i*groupsPerJob + groupsPerJob)]
jobName := fmt.Sprintf("%s%d", jobNamePrefix, i)
tsets[jobName] = jobGroups
}
return tsets
}

func createTestDiscoverer(allocationStrategy string, prehookConfig map[string][]*relabel.Config) (*target.Discoverer, allocation.Allocator) {
ctx := context.Background()
logger := ctrl.Log.WithName(fmt.Sprintf("bench-%s", allocationStrategy))
ctrl.SetLogger(logr.New(log.NullLogSink{}))
allocatorPrehook := prehook.New("relabel-config", logger)
allocatorPrehook.SetConfig(prehookConfig)
allocator, err := allocation.New(allocationStrategy, logger, allocation.WithFilter(allocatorPrehook))
srv := server.NewServer(logger, allocator, "localhost:0")
if err != nil {
setupLog.Error(err, "Unable to initialize allocation strategy")
os.Exit(1)
}
registry := prometheus.NewRegistry()
sdMetrics, _ := discovery.CreateAndRegisterSDMetrics(registry)
discoveryManager := discovery.NewManager(ctx, gokitlog.NewNopLogger(), registry, sdMetrics)
targetDiscoverer := target.NewDiscoverer(logger, discoveryManager, allocatorPrehook, srv)
return targetDiscoverer, allocator
}
6 changes: 5 additions & 1 deletion cmd/otel-allocator/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,13 @@ type Config struct {
type PrometheusCRConfig struct {
Enabled bool `yaml:"enabled,omitempty"`
PodMonitorSelector *metav1.LabelSelector `yaml:"pod_monitor_selector,omitempty"`
PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"`
ServiceMonitorSelector *metav1.LabelSelector `yaml:"service_monitor_selector,omitempty"`
ServiceMonitorNamespaceSelector *metav1.LabelSelector `yaml:"service_monitor_namespace_selector,omitempty"`
PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"`
ScrapeConfigSelector *metav1.LabelSelector `yaml:"scrape_config_selector,omitempty"`
ScrapeConfigNamespaceSelector *metav1.LabelSelector `yaml:"scrape_config_namespace_selector,omitempty"`
ProbeSelector *metav1.LabelSelector `yaml:"probe_selector,omitempty"`
ProbeNamespaceSelector *metav1.LabelSelector `yaml:"probe_namespace_selector,omitempty"`
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
}

Expand Down
Loading

0 comments on commit b1ecbb8

Please sign in to comment.