diff --git a/.github/workflows/build-fb-image.yaml b/.github/workflows/build-fb-image.yaml index 84a2aeb81..7248a3078 100644 --- a/.github/workflows/build-fb-image.yaml +++ b/.github/workflows/build-fb-image.yaml @@ -13,8 +13,8 @@ on: - "pkg/filenotify/**" env: - FB_IMG: 'kubesphere/fluent-bit:v2.1.3' - FB_IMG_DEBUG: 'kubesphere/fluent-bit:v2.1.3-debug' + FB_IMG: 'kubesphere/fluent-bit:v2.1.4' + FB_IMG_DEBUG: 'kubesphere/fluent-bit:v2.1.4-debug' jobs: build: diff --git a/.github/workflows/lint-test.yaml b/.github/workflows/lint-test.yaml index 9665ac9d5..aaff307d6 100644 --- a/.github/workflows/lint-test.yaml +++ b/.github/workflows/lint-test.yaml @@ -38,7 +38,7 @@ jobs: run: ct lint --chart-dirs charts/fluent-operator --config charts/ct.yaml - name: Create kind cluster - uses: helm/kind-action@v1.5.0 + uses: helm/kind-action@v1.7.0 if: steps.list-changed.outputs.changed == 'true' - name: Run chart-testing (install) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19b920301..bb7802903 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,49 @@ +## 2.3.0 / 2023-06-05 +### Features +- Feat: Adding influxdb plugin (#690) +- Feat: Add EnvVars support to FluentD (#697) +- Feat: Add Pod Annotations support to FluentD (#698) +- Feat: Fluent operator & fluentbit: Added tolerations, nodeSelector + more (#704) +- Feat: Add fluentbit.affinity configuration (#726) +- Feat: Allow setting fluentd PodSecurityContext (#744) +- Feat: Fluentd in_tail plugin (#753) +- Feat: Add missing fluentd buffer options (#757) +- Feat: Add AWS Kinesis Data Streams output plugin for Fluent Bit (#768) +- Feat: Add global log_level support for fluentd (#770) +- Feat: Add scheduler support for fluentbit & fluentd (#771) + +### ENHANCEMENT +- EnvVars support in fluentbit helm template (#706) +- Add uri field for each telemetry type in opentelemetry plugin, remove old uri field (#708) +- Adjust fluentd watcher dependabot (#716) +- remove the deprecated -i flag in go build (#720) +- Adjust fluentd arm64 image build timeout (#721) +- Adjust edge metrics collection config (#736) +- Add some fluentbit helm opts (#743) +- Align CRDs and Operator with the fluentbit loki output (#756) +- Fluent-bit upgrade to v2.1.4 (#767) +- build(deps): Bump k8s.io/apimachinery from 0.26.3 to 0.27.1 (#701) +- build(deps): Bump helm/chart-testing-action from 2.1.0 to 2.4.0 (#710) +- build(deps): Bump k8s.io/klog/v2 from 2.90.1 to 2.100.1 (#712) +- build(deps): Bump golang from 1.20.3-alpine3.17 to 1.20.4-alpine3.17 in /cmd/fluent-manager (#713) +- build(deps): Bump golang from 1.20.3-alpine3.16 to 1.20.4-alpine3.16 in /cmd/fluent-watcher/fluentbit (#714) +- build(deps): Bump golang from 1.20.2 to 1.20.4 in /docs/best-practice/forwarding-logs-via-http (#715) +- build(deps): Bump golang from 1.19.2-alpine3.16 to 1.20.4-alpine3.16 in /cmd/fluent-watcher/fluentd (#717) +- build(deps): Bump arm64v8/ruby from 3.1-slim-bullseye to 3.2-slim-bullseye in /cmd/fluent-watcher/fluentd (#718) +- build(deps): Bump alpine from 3.16 to 3.17 in /cmd/fluent-watcher/fluentd (#719) +- build(deps): Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#748) +- build(deps): Bump k8s.io/apimachinery from 0.27.1 to 0.27.2 (#751) +- build(deps): Bump helm/kind-action from 1.5.0 to 1.7.0 (#765) + +### BUGFIX +- Fix: Fix missing log level (#691) +- Fix: Fix rewrite_tag match rule and trim start of string pattern (#692) +- Fix(docs): Update cluster outputs docs link (#724) +- Fix: dereference pointers in parser filter plugin for fluentd (#745) +- Fix: fluentbit namespace-logging: only generate rewrite tag config once (#746) +- Fix: minor typo fix for firehose (#764) +- Fix: fix typo for estimate_current_event in fluentd (#769) + ## 2.2.0 / 2023-04-07 ### Features - Feat: Adding Fluentd cloudwatch plugin (#586) diff --git a/Makefile b/Makefile index bbf952afd..3cd740757 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION?=$(shell cat VERSION | tr -d " \t\n\r") # Image URL to use all building/pushing image targets -FB_IMG ?= kubesphere/fluent-bit:v2.1.3 -FB_IMG_DEBUG ?= kubesphere/fluent-bit:v2.1.3-debug +FB_IMG ?= kubesphere/fluent-bit:v2.1.4 +FB_IMG_DEBUG ?= kubesphere/fluent-bit:v2.1.4-debug FD_IMG ?= kubesphere/fluentd:v1.15.3 FO_IMG ?= kubesphere/fluent-operator:$(VERSION) FD_IMG_BASE ?= kubesphere/fluentd:v1.15.3-arm64-base diff --git a/RELEASE.md b/RELEASE.md index 7b8781788..9443adbab 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -35,6 +35,7 @@ This page describes the release process and the currently planned schedule for u | v2.0.1 | 2023-02-08 | Elon Cheng (GitHub: @wenchajun) | | v2.1.0 | 2023-03-13 | Elon Cheng (GitHub: @wenchajun) | | v2.2.0 | 2023-04-07 | Elon Cheng (GitHub: @wenchajun) | +| v2.3.0 | 2023-06-05 | Elon Cheng (GitHub: @wenchajun) | # How to cut a new release diff --git a/apis/fluentbit/v1alpha2/clusteroutput_types.go b/apis/fluentbit/v1alpha2/clusteroutput_types.go index 1d2e30d81..b1e78f448 100644 --- a/apis/fluentbit/v1alpha2/clusteroutput_types.go +++ b/apis/fluentbit/v1alpha2/clusteroutput_types.go @@ -81,7 +81,9 @@ type OutputSpec struct { // DataDog defines DataDog Output configuration. DataDog *output.DataDog `json:"datadog,omitempty"` // Firehose defines Firehose Output configuration. - Fireose *output.Firehose `json:"firehose,omitempty"` + Firehose *output.Firehose `json:"firehose,omitempty"` + // Kinesis defines Kinesis Output configuration. + Kinesis *output.Kinesis `json:"kinesis,omitempty"` // Stackdriver defines Stackdriver Output Configuration Stackdriver *output.Stackdriver `json:"stackdriver,omitempty"` // Splunk defines Splunk Output Configuration @@ -92,6 +94,8 @@ type OutputSpec struct { OpenTelemetry *output.OpenTelemetry `json:"opentelemetry,omitempty"` // PrometheusRemoteWrite_types defines Prometheus Remote Write configuration. PrometheusRemoteWrite *output.PrometheusRemoteWrite `json:"prometheusRemoteWrite,omitempty"` + // S3 defines S3 Output configuration. + S3 *output.S3 `json:"s3,omitempty"` // CustomPlugin defines Custom Output configuration. CustomPlugin *custom.CustomPlugin `json:"customPlugin,omitempty"` diff --git a/apis/fluentbit/v1alpha2/collector_types.go b/apis/fluentbit/v1alpha2/collector_types.go index 79c1c30d7..d3c112289 100644 --- a/apis/fluentbit/v1alpha2/collector_types.go +++ b/apis/fluentbit/v1alpha2/collector_types.go @@ -77,6 +77,8 @@ type CollectorSpec struct { Ports []corev1.ContainerPort `json:"ports,omitempty"` // Service represents configurations on the fluent-bit service. Service CollectorService `json:"service,omitempty"` + // SchedulerName represents the desired scheduler for the Fluentbit collector pods + SchedulerName string `json:"schedulerName,omitempty"` } // CollectorService defines the service of the FluentBit diff --git a/apis/fluentbit/v1alpha2/fluentbit_types.go b/apis/fluentbit/v1alpha2/fluentbit_types.go index 9f0991347..02f6cd473 100644 --- a/apis/fluentbit/v1alpha2/fluentbit_types.go +++ b/apis/fluentbit/v1alpha2/fluentbit_types.go @@ -75,6 +75,8 @@ type FluentBitSpec struct { Labels map[string]string `json:"labels,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + // ContainerSecurityContext holds container-level security attributes. + ContainerSecurityContext *corev1.SecurityContext `json:"containerSecurityContext,omitempty"` // Host networking is requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. HostNetwork bool `json:"hostNetwork,omitempty"` // EnvVars represent environment variables that can be passed to fluentbit pods. @@ -96,6 +98,8 @@ type FluentBitSpec struct { MetricsPort int32 `json:"metricsPort,omitempty"` // Service represents configurations on the fluent-bit service. Service FluentBitService `json:"service,omitempty"` + // SchedulerName represents the desired scheduler for fluent-bit pods. + SchedulerName string `json:"schedulerName,omitempty"` } // FluentBitService defines the service of the FluentBit diff --git a/apis/fluentbit/v1alpha2/plugins/custom/custom_types.go b/apis/fluentbit/v1alpha2/plugins/custom/custom_types.go index fc993933a..ab3dbb8b0 100644 --- a/apis/fluentbit/v1alpha2/plugins/custom/custom_types.go +++ b/apis/fluentbit/v1alpha2/plugins/custom/custom_types.go @@ -13,7 +13,7 @@ import ( // +kubebuilder:object:generate:=true // CustomPlugin is used to support filter plugins that are not implemented yet.
-// **For example usage, refer to https://github.com/jjsiv/fluent-operator/blob/master/docs/best-practice/custom-plugin.md** +// **For example usage, refer to https://github.com/fluent/fluent-operator/blob/master/docs/best-practice/custom-plugin.md** type CustomPlugin struct { Config string `json:"config,omitempty"` } diff --git a/apis/fluentbit/v1alpha2/plugins/output/kinesis_types.go b/apis/fluentbit/v1alpha2/plugins/output/kinesis_types.go new file mode 100644 index 000000000..64b61c560 --- /dev/null +++ b/apis/fluentbit/v1alpha2/plugins/output/kinesis_types.go @@ -0,0 +1,78 @@ +package output + +import ( + "fmt" + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins" + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins/params" +) + +// +kubebuilder:object:generate:=true + +// The Kinesis output plugin, allows to ingest your records into AWS Kinesis.
+// It uses the new high performance and highly efficient kinesis plugin is called kinesis_streams instead of the older Golang Fluent Bit plugin released in 2019. +// https://docs.fluentbit.io/manual/pipeline/outputs/kinesis
+// https://github.com/aws/amazon-kinesis-streams-for-fluent-bit
+type Kinesis struct { + // The AWS region. + Region string `json:"region"` + // The name of the Kinesis Streams Delivery stream that you want log records sent to. + Stream string `json:"stream"` + // Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be added to records sent to Kinesis. + TimeKey string `json:"timeKey,omitempty"` + // strftime compliant format string for the timestamp; for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond precision with '%3N' and supports nanosecond precision with '%9N' and '%L'; for example, adding '%3N' to support millisecond '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + TimeKeyFormat string `json:"timeKeyFormat,omitempty"` + // By default, the whole log record will be sent to Kinesis. If you specify a key name with this option, then only the value of that key will be sent to Kinesis. For example, if you are using the Fluentd Docker log driver, you can specify log_key log and only the log message will be sent to Kinesis. + LogKey string `json:"logKey,omitempty"` + // ARN of an IAM role to assume (for cross account access). + RoleARN string `json:"roleARN,omitempty"` + // Specify a custom endpoint for the Kinesis API. + Endpoint string `json:"endpoint,omitempty"` + // Custom endpoint for the STS API. + STSEndpoint string `json:"stsEndpoint,omitempty"` + // Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit retry mechanism with backoff. Instead, it enables an immediate retry with no delay for networking errors, which may help improve throughput when there are transient/random networking issues. This option defaults to true. + AutoRetryRequests *bool `json:"autoRetryRequests,omitempty"` + // Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID. + ExternalID string `json:"externalID,omitempty"` +} + +// Name implement Section() method +func (*Kinesis) Name() string { + return "kinesis_streams" +} + +// Params implement Section() method +func (k *Kinesis) Params(_ plugins.SecretLoader) (*params.KVs, error) { + kvs := params.NewKVs() + if k.Region != "" { + kvs.Insert("region", k.Region) + } + if k.Stream != "" { + kvs.Insert("stream", k.Stream) + } + if k.TimeKey != "" { + kvs.Insert("time_key", k.TimeKey) + } + if k.TimeKeyFormat != "" { + kvs.Insert("time_key_format", k.TimeKeyFormat) + } + if k.LogKey != "" { + kvs.Insert("log_key", k.LogKey) + } + if k.RoleARN != "" { + kvs.Insert("role_arn", k.RoleARN) + } + if k.Endpoint != "" { + kvs.Insert("endpoint", k.Endpoint) + } + if k.STSEndpoint != "" { + kvs.Insert("sts_endpoint", k.STSEndpoint) + } + if k.AutoRetryRequests != nil { + kvs.Insert("auto_retry_requests", fmt.Sprint(*k.AutoRetryRequests)) + } + if k.ExternalID != "" { + kvs.Insert("external_id", k.ExternalID) + } + + return kvs, nil +} diff --git a/apis/fluentbit/v1alpha2/plugins/output/kinesis_types_test.go b/apis/fluentbit/v1alpha2/plugins/output/kinesis_types_test.go new file mode 100644 index 000000000..65db782a9 --- /dev/null +++ b/apis/fluentbit/v1alpha2/plugins/output/kinesis_types_test.go @@ -0,0 +1,43 @@ +package output + +import ( + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins" + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins/params" + "github.com/onsi/gomega" + "testing" +) + +func TestOutput_Kinesis_Params(t *testing.T) { + g := gomega.NewWithT(t) + + sl := plugins.NewSecretLoader(nil, "test namespace") + + ki := Kinesis{ + Region: "us-east-1", + Stream: "test_stream", + TimeKey: "test_time_key", + TimeKeyFormat: "%Y-%m-%dT%H:%M:%S.%3N", + LogKey: "test_time_key", + RoleARN: "arn:aws:iam:test", + Endpoint: "test_endpoint", + STSEndpoint: "test_sts_endpoint", + AutoRetryRequests: ptrBool(true), + ExternalID: "test_external_id", + } + + expected := params.NewKVs() + expected.Insert("region", "us-east-1") + expected.Insert("stream", "test_stream") + expected.Insert("time_key", "test_time_key") + expected.Insert("time_key_format", "%Y-%m-%dT%H:%M:%S.%3N") + expected.Insert("log_key", "test_time_key") + expected.Insert("role_arn", "arn:aws:iam:test") + expected.Insert("endpoint", "test_endpoint") + expected.Insert("sts_endpoint", "test_sts_endpoint") + expected.Insert("auto_retry_requests", "true") + expected.Insert("external_id", "test_external_id") + + kvs, err := ki.Params(sl) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(kvs).To(gomega.Equal(expected)) +} diff --git a/apis/fluentbit/v1alpha2/plugins/output/loki_types.go b/apis/fluentbit/v1alpha2/plugins/output/loki_types.go index 2c104de68..2e95eae09 100644 --- a/apis/fluentbit/v1alpha2/plugins/output/loki_types.go +++ b/apis/fluentbit/v1alpha2/plugins/output/loki_types.go @@ -48,10 +48,10 @@ type Loki struct { // If set to true, it will add all Kubernetes labels to the Stream labels. // +kubebuilder:validation:Enum:=on;off AutoKubernetesLabels string `json:"autoKubernetesLabels,omitempty"` - // Specify the name of the key from the original record that contains the Tenant ID. + // Specify the name of the key from the original record that contains the Tenant ID. // The value of the key is set as X-Scope-OrgID of HTTP header. It is useful to set Tenant ID dynamically. - TenantIDKey string `json:"tenantIDKey,omitempty"` - *plugins.TLS `json:"tls,omitempty"` + TenantIDKey string `json:"tenantIDKey,omitempty"` + *plugins.TLS `json:"tls,omitempty"` } // implement Section() method diff --git a/apis/fluentbit/v1alpha2/plugins/output/s3_types.go b/apis/fluentbit/v1alpha2/plugins/output/s3_types.go new file mode 100644 index 000000000..063499b30 --- /dev/null +++ b/apis/fluentbit/v1alpha2/plugins/output/s3_types.go @@ -0,0 +1,157 @@ +package output + +import ( + "fmt" + + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins" + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins/params" +) + +// +kubebuilder:object:generate:=true + +// The S3 output plugin, allows to flush your records into a S3 time series database.
+// **For full documentation, refer to https://docs.fluentbit.io/manual/pipeline/outputs/s3** +type S3 struct { + // The AWS region of your S3 bucket + Region string `json:"Region"` + // S3 Bucket name + Bucket string `json:"Bucket"` + // Specify the name of the time key in the output record. To disable the time key just set the value to false. + JsonDateKey string `json:"JsonDateKey,omitempty"` + // Specify the format of the date. Supported formats are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681) + JsonDateFormat string `json:"JsonDateFormat,omitempty"` + // Specifies the size of files in S3. Minimum size is 1M. With use_put_object On the maximum size is 1G. With multipart upload mode, the maximum size is 50G. + TotalFileSize string `json:"TotalFileSize,omitempty"` + // The size of each 'part' for multipart uploads. Max: 50M + UploadChunkSize string `json:"UploadChunkSize,omitempty"` + // Whenever this amount of time has elapsed, Fluent Bit will complete an upload and create a new file in S3. For example, set this value to 60m and you will get a new file every hour. + UploadTimeout string `json:"UploadTimeout,omitempty"` + // Directory to locally buffer data before sending. + StoreDir string `json:"StoreDir,omitempty"` + // The size of the limitation for disk usage in S3. + StoreDirLimitSize string `json:"StoreDirLimitSize,omitempty"` + // Format string for keys in S3. + S3KeyFormat string `json:"S3KeyFormat,omitempty"` + // A series of characters which will be used to split the tag into 'parts' for use with the s3_key_format option. + S3KeyFormatTagDelimiters string `json:"S3KeyFormatTagDelimiters,omitempty"` + // Disables behavior where UUID string is automatically appended to end of S3 key name when $UUID is not provided in s3_key_format. $UUID, time formatters, $TAG, and other dynamic key formatters all work as expected while this feature is set to true. + StaticFilePath *bool `json:"StaticFilePath,omitempty"` + // Use the S3 PutObject API, instead of the multipart upload API. + UsePutObject *bool `json:"UsePutObject,omitempty"` + // ARN of an IAM role to assume + RoleArn string `json:"RoleArn,omitempty"` + // Custom endpoint for the S3 API. + Endpoint string `json:"Endpoint,omitempty"` + // Custom endpoint for the STS API. + StsEndpoint string `json:"StsEndpoint,omitempty"` + // Predefined Canned ACL Policy for S3 objects. + CannedAcl string `json:"CannedAcl,omitempty"` + // Compression type for S3 objects. + Compression string `json:"Compression,omitempty"` + // A standard MIME type for the S3 object; this will be set as the Content-Type HTTP header. + ContentType string `json:"ContentType,omitempty"` + // Send the Content-MD5 header with PutObject and UploadPart requests, as is required when Object Lock is enabled. + SendContentMd5 *bool `json:"SendContentMd5,omitempty"` + // Immediately retry failed requests to AWS services once. + AutoRetryRequests *bool `json:"AutoRetryRequests,omitempty"` + // By default, the whole log record will be sent to S3. If you specify a key name with this option, then only the value of that key will be sent to S3. + LogKey string `json:"LogKey,omitempty"` + // Normally, when an upload request fails, there is a high chance for the last received chunk to be swapped with a later chunk, resulting in data shuffling. This feature prevents this shuffling by using a queue logic for uploads. + PreserveDataOrdering *bool `json:"PreserveDataOrdering,omitempty"` + // Specify the storage class for S3 objects. If this option is not specified, objects will be stored with the default 'STANDARD' storage class. + StorageClass string `json:"StorageClass,omitempty"` + // Integer value to set the maximum number of retries allowed. + RetryLimit *int32 `json:"RetryLimit,omitempty"` + // Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID. + ExternalId string `json:"ExternalId,omitempty"` +} + +// Name implement Section() method +func (_ *S3) Name() string { + return "s3" +} + +func (o *S3) Params(sl plugins.SecretLoader) (*params.KVs, error) { + kvs := params.NewKVs() + // S3 Validation + + if o.Region != "" { + kvs.Insert("region", o.Region) + } + if o.Bucket != "" { + kvs.Insert("bucket", o.Bucket) + } + if o.JsonDateKey != "" { + kvs.Insert("json_date_key", o.JsonDateKey) + } + if o.JsonDateFormat != "" { + kvs.Insert("json_date_format", o.JsonDateFormat) + } + if o.TotalFileSize != "" { + kvs.Insert("total_file_size", o.TotalFileSize) + } + if o.UploadChunkSize != "" { + kvs.Insert("upload_chunk_size", o.UploadChunkSize) + } + if o.UploadTimeout != "" { + kvs.Insert("upload_timeout", o.UploadTimeout) + } + if o.StoreDir != "" { + kvs.Insert("store_dir", o.StoreDir) + } + if o.StoreDirLimitSize != "" { + kvs.Insert("store_dir_limit_size", o.StoreDirLimitSize) + } + if o.S3KeyFormat != "" { + kvs.Insert("s3_key_format", o.S3KeyFormat) + } + if o.S3KeyFormatTagDelimiters != "" { + kvs.Insert("s3_key_format_tag_delimiters", o.S3KeyFormatTagDelimiters) + } + if o.StaticFilePath != nil { + kvs.Insert("static_file_path", fmt.Sprint(*o.StaticFilePath)) + } + if o.UsePutObject != nil { + kvs.Insert("use_put_object", fmt.Sprint(*o.UsePutObject)) + } + if o.RoleArn != "" { + kvs.Insert("role_arn", o.RoleArn) + } + if o.Endpoint != "" { + kvs.Insert("endpoint", o.Endpoint) + } + if o.StsEndpoint != "" { + kvs.Insert("sts_endpoint", o.StsEndpoint) + } + if o.CannedAcl != "" { + kvs.Insert("canned_acl", o.CannedAcl) + } + if o.Compression != "" { + kvs.Insert("compression", o.Compression) + } + if o.ContentType != "" { + kvs.Insert("content_type", o.ContentType) + } + if o.SendContentMd5 != nil { + kvs.Insert("send_content_md5", fmt.Sprint(*o.SendContentMd5)) + } + if o.AutoRetryRequests != nil { + kvs.Insert("auto_retry_requests", fmt.Sprint(*o.AutoRetryRequests)) + } + if o.LogKey != "" { + kvs.Insert("log_key", o.LogKey) + } + if o.PreserveDataOrdering != nil { + kvs.Insert("preserve_data_ordering", fmt.Sprint(*o.PreserveDataOrdering)) + } + if o.StorageClass != "" { + kvs.Insert("storage_class", o.StorageClass) + } + if o.RetryLimit != nil { + kvs.Insert("retry_limit", fmt.Sprint(*o.RetryLimit)) + } + if o.ExternalId != "" { + kvs.Insert("external_id", o.ExternalId) + } + return kvs, nil +} diff --git a/apis/fluentbit/v1alpha2/plugins/output/s3_types_test.go b/apis/fluentbit/v1alpha2/plugins/output/s3_types_test.go new file mode 100644 index 000000000..39b622d7d --- /dev/null +++ b/apis/fluentbit/v1alpha2/plugins/output/s3_types_test.go @@ -0,0 +1,77 @@ +package output + +import ( + "testing" + + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins" + "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2/plugins/params" + . "github.com/onsi/gomega" +) + +func TestOutput_S3_Params(t *testing.T) { + g := NewGomegaWithT(t) + + sl := plugins.NewSecretLoader(nil, "test namespace") + + s3 := S3{ + Region: "us-east-1", + Bucket: "fluentbit", + JsonDateKey: "2018-05-30T09:39:52.000681Z", + JsonDateFormat: "iso8601", + TotalFileSize: "100M", + UploadChunkSize: "50M", + UploadTimeout: "10m", + StoreDir: "/tmp/fluent-bit/s3", + StoreDirLimitSize: "0", + S3KeyFormat: "/fluent-bit-logs/$TAG/%Y/%m/%d/%H/%M/%S", + S3KeyFormatTagDelimiters: ".", + StaticFilePath: ptrAny(false), + UsePutObject: ptrAny(false), + RoleArn: "role", + Endpoint: "endpoint", + StsEndpoint: "sts_endpoint", + CannedAcl: "canned_acl", + Compression: "gzip", + ContentType: "text/plain", + SendContentMd5: ptrAny(false), + AutoRetryRequests: ptrAny(true), + LogKey: "log_key", + PreserveDataOrdering: ptrAny(true), + StorageClass: "storage_class", + RetryLimit: ptrAny(int32(1)), + ExternalId: "external_id", + } + + expected := params.NewKVs() + expected.Insert("region", "us-east-1") + expected.Insert("bucket", "fluentbit") + expected.Insert("json_date_key", "2018-05-30T09:39:52.000681Z") + expected.Insert("json_date_format", "iso8601") + expected.Insert("total_file_size", "100M") + expected.Insert("upload_chunk_size", "50M") + expected.Insert("upload_timeout", "10m") + expected.Insert("store_dir", "/tmp/fluent-bit/s3") + expected.Insert("store_dir_limit_size", "0") + expected.Insert("s3_key_format", "/fluent-bit-logs/$TAG/%Y/%m/%d/%H/%M/%S") + expected.Insert("s3_key_format_tag_delimiters", ".") + expected.Insert("static_file_path", "false") + expected.Insert("use_put_object", "false") + expected.Insert("role_arn", "role") + expected.Insert("endpoint", "endpoint") + expected.Insert("sts_endpoint", "sts_endpoint") + expected.Insert("canned_acl", "canned_acl") + expected.Insert("compression", "gzip") + expected.Insert("content_type", "text/plain") + expected.Insert("send_content_md5", "false") + expected.Insert("auto_retry_requests", "true") + expected.Insert("log_key", "log_key") + expected.Insert("preserve_data_ordering", "true") + expected.Insert("storage_class", "storage_class") + expected.Insert("retry_limit", "1") + expected.Insert("external_id", "external_id") + + kvs, err := s3.Params(sl) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(kvs).To(Equal(expected)) + +} diff --git a/apis/fluentbit/v1alpha2/plugins/output/zz_generated.deepcopy.go b/apis/fluentbit/v1alpha2/plugins/output/zz_generated.deepcopy.go index 195e25e95..574f4bf02 100644 --- a/apis/fluentbit/v1alpha2/plugins/output/zz_generated.deepcopy.go +++ b/apis/fluentbit/v1alpha2/plugins/output/zz_generated.deepcopy.go @@ -479,6 +479,26 @@ func (in *Kafka) DeepCopy() *Kafka { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kinesis) DeepCopyInto(out *Kinesis) { + *out = *in + if in.AutoRetryRequests != nil { + in, out := &in.AutoRetryRequests, &out.AutoRetryRequests + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kinesis. +func (in *Kinesis) DeepCopy() *Kinesis { + if in == nil { + return nil + } + out := new(Kinesis) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Loki) DeepCopyInto(out *Loki) { *out = *in @@ -747,6 +767,51 @@ func (in *PrometheusRemoteWrite) DeepCopy() *PrometheusRemoteWrite { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3) DeepCopyInto(out *S3) { + *out = *in + if in.StaticFilePath != nil { + in, out := &in.StaticFilePath, &out.StaticFilePath + *out = new(bool) + **out = **in + } + if in.UsePutObject != nil { + in, out := &in.UsePutObject, &out.UsePutObject + *out = new(bool) + **out = **in + } + if in.SendContentMd5 != nil { + in, out := &in.SendContentMd5, &out.SendContentMd5 + *out = new(bool) + **out = **in + } + if in.AutoRetryRequests != nil { + in, out := &in.AutoRetryRequests, &out.AutoRetryRequests + *out = new(bool) + **out = **in + } + if in.PreserveDataOrdering != nil { + in, out := &in.PreserveDataOrdering, &out.PreserveDataOrdering + *out = new(bool) + **out = **in + } + if in.RetryLimit != nil { + in, out := &in.RetryLimit, &out.RetryLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3. +func (in *S3) DeepCopy() *S3 { + if in == nil { + return nil + } + out := new(S3) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Splunk) DeepCopyInto(out *Splunk) { *out = *in diff --git a/apis/fluentbit/v1alpha2/zz_generated.deepcopy.go b/apis/fluentbit/v1alpha2/zz_generated.deepcopy.go index e037d0469..4ba987718 100644 --- a/apis/fluentbit/v1alpha2/zz_generated.deepcopy.go +++ b/apis/fluentbit/v1alpha2/zz_generated.deepcopy.go @@ -961,6 +961,11 @@ func (in *FluentBitSpec) DeepCopyInto(out *FluentBitSpec) { *out = new(v1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.ContainerSecurityContext != nil { + in, out := &in.ContainerSecurityContext, &out.ContainerSecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } if in.EnvVars != nil { in, out := &in.EnvVars, &out.EnvVars *out = make([]v1.EnvVar, len(*in)) @@ -1293,11 +1298,16 @@ func (in *OutputSpec) DeepCopyInto(out *OutputSpec) { *out = new(output.DataDog) (*in).DeepCopyInto(*out) } - if in.Fireose != nil { - in, out := &in.Fireose, &out.Fireose + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose *out = new(output.Firehose) (*in).DeepCopyInto(*out) } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = new(output.Kinesis) + (*in).DeepCopyInto(*out) + } if in.Stackdriver != nil { in, out := &in.Stackdriver, &out.Stackdriver *out = new(output.Stackdriver) @@ -1323,6 +1333,11 @@ func (in *OutputSpec) DeepCopyInto(out *OutputSpec) { *out = new(output.PrometheusRemoteWrite) (*in).DeepCopyInto(*out) } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(output.S3) + (*in).DeepCopyInto(*out) + } if in.CustomPlugin != nil { in, out := &in.CustomPlugin, &out.CustomPlugin *out = new(custom.CustomPlugin) diff --git a/apis/fluentd/v1alpha1/fluentd_types.go b/apis/fluentd/v1alpha1/fluentd_types.go index 70fd984a3..7697d9fbf 100644 --- a/apis/fluentd/v1alpha1/fluentd_types.go +++ b/apis/fluentd/v1alpha1/fluentd_types.go @@ -36,12 +36,21 @@ const ( type FluentdSpec struct { // Fluentd global inputs. GlobalInputs []input.Input `json:"globalInputs,omitempty"` + // Select cluster filter plugins used to filter for the default cluster output + DefaultFilterSelector *metav1.LabelSelector `json:"defaultFilterSelector,omitempty"` + // Select cluster output plugins used to send all logs that did not match any route to the matching outputs + DefaultOutputSelector *metav1.LabelSelector `json:"defaultOutputSelector,omitempty"` // By default will build the related service according to the globalinputs definition. DisableService bool `json:"disableService,omitempty"` // Numbers of the Fluentd instance Replicas *int32 `json:"replicas,omitempty"` // Numbers of the workers in Fluentd instance Workers *int32 `json:"workers,omitempty"` + // Global logging verbosity + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum:=fatal;error;warn;info;debug;trace + // +kubebuilder:default:=info + LogLevel string `json:"logLevel,omitempty"` // Fluentd image. Image string `json:"image,omitempty"` // Fluentd Watcher command line arguments. @@ -86,6 +95,10 @@ type FluentdSpec struct { VolumeClaimTemplates []corev1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` // Service represents configurations on the fluentd service. Service FluentDService `json:"service,omitempty"` + // PodSecurityContext represents the security context for the fluentd pods. + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + // SchedulerName represents the desired scheduler for fluentd pods. + SchedulerName string `json:"schedulerName,omitempty"` } // FluentDService the service of the FluentD diff --git a/apis/fluentd/v1alpha1/helper.go b/apis/fluentd/v1alpha1/helper.go index 4a5ad0e29..a9701f4cf 100644 --- a/apis/fluentd/v1alpha1/helper.go +++ b/apis/fluentd/v1alpha1/helper.go @@ -98,6 +98,7 @@ func (pgr *PluginResources) BuildCfgRouter(cfg Renderer) (*fluentdRouter.Route, return nil, err } + // Insert the route to the MainRouter pgr.MainRouterPlugins.InsertChilds(routePluginStore) return cfgRoute, nil @@ -105,8 +106,12 @@ func (pgr *PluginResources) BuildCfgRouter(cfg Renderer) (*fluentdRouter.Route, // PatchAndFilterClusterLevelResources will combine and patch all the cluster CRs that the fluentdconfig selected, // convert the related filter/output pluginstores to the global pluginresources. -func (pgr *PluginResources) PatchAndFilterClusterLevelResources(sl plugins.SecretLoader, cfgId string, - clusterfilters []ClusterFilter, clusteroutputs []ClusterOutput) (*CfgResources, []string) { +func (pgr *PluginResources) PatchAndFilterClusterLevelResources( + sl plugins.SecretLoader, + cfgId string, + clusterfilters []ClusterFilter, + clusteroutputs []ClusterOutput, +) (*CfgResources, []string) { // To store all filters/outputs plugins that this cfg selected cfgResources := NewCfgResources() @@ -135,8 +140,12 @@ func (pgr *PluginResources) PatchAndFilterClusterLevelResources(sl plugins.Secre // PatchAndFilterNamespacedLevelResources will combine and patch all the cluster CRs that the fluentdconfig selected, // convert the related filter/output pluginstores to the global pluginresources. -func (pgr *PluginResources) PatchAndFilterNamespacedLevelResources(sl plugins.SecretLoader, cfgId string, - filters []Filter, outputs []Output) (*CfgResources, []string) { +func (pgr *PluginResources) PatchAndFilterNamespacedLevelResources( + sl plugins.SecretLoader, + cfgId string, + filters []Filter, + outputs []Output, +) (*CfgResources, []string) { // To store all filters/outputs plugins that this cfg selected cfgResources := NewCfgResources() @@ -163,8 +172,11 @@ func (pgr *PluginResources) PatchAndFilterNamespacedLevelResources(sl plugins.Se return cfgResources, errs } -func (r *CfgResources) filterForFilters(cfgId, namespace, name, crdtype string, - sl plugins.SecretLoader, filters []filter.Filter) error { +func (r *CfgResources) filterForFilters( + cfgId, namespace, name, crdtype string, + sl plugins.SecretLoader, + filters []filter.Filter, +) error { for n, filter := range filters { filterId := fmt.Sprintf("%s::%s::%s::%s-%d", cfgId, namespace, crdtype, name, n) filter.FilterCommon.Id = &filterId @@ -189,8 +201,11 @@ func (r *CfgResources) filterForFilters(cfgId, namespace, name, crdtype string, return nil } -func (r *CfgResources) filterForOutputs(cfgId, namespace, name, crdtype string, - sl plugins.SecretLoader, outputs []output.Output) error { +func (r *CfgResources) filterForOutputs( + cfgId, namespace, name, crdtype string, + sl plugins.SecretLoader, + outputs []output.Output, +) error { for n, output := range outputs { outputId := fmt.Sprintf("%s::%s::%s::%s-%d", cfgId, namespace, crdtype, name, n) output.OutputCommon.Id = &outputId @@ -218,7 +233,7 @@ func (r *CfgResources) filterForOutputs(cfgId, namespace, name, crdtype string, // convert the cfg plugins to a label plugin, appends to the global label plugins func (pgr *PluginResources) WithCfgResources(cfgRouteLabel string, r *CfgResources) error { if len(r.FilterPlugins) == 0 && len(r.OutputPlugins) == 0 { - return errors.New("no filter plugins or output plugins matched") + return errors.New("no filter plugins and no output plugins matched") } cfgLabelPlugin := params.NewPluginStore("label") diff --git a/apis/fluentd/v1alpha1/plugins/common/buffer_types.go b/apis/fluentd/v1alpha1/plugins/common/buffer_types.go index 48b719113..8b4f59338 100644 --- a/apis/fluentd/v1alpha1/plugins/common/buffer_types.go +++ b/apis/fluentd/v1alpha1/plugins/common/buffer_types.go @@ -261,6 +261,14 @@ func (b *Buffer) Params(_ plugins.SecretLoader) (*params.PluginStore, error) { ps.InsertPairs("retry_type", fmt.Sprint(*b.RetryType)) } + if b.RetryMaxTimes != nil { + ps.InsertPairs("retry_max_times", fmt.Sprint(*b.RetryMaxTimes)) + } + + if b.RetryForever != nil { + ps.InsertPairs("retry_forever", fmt.Sprint(*b.RetryForever)) + } + if b.RetryWait != nil { ps.InsertPairs("retry_wait", fmt.Sprint(*b.RetryWait)) } @@ -286,7 +294,7 @@ func (b *Buffer) Params(_ plugins.SecretLoader) (*params.PluginStore, error) { ps.InsertPairs("time_type", fmt.Sprint(*b.Time.TimeType)) } if b.Time.TimeFormat != nil { - ps.InsertPairs("time_type", fmt.Sprint(*b.Time.TimeFormat)) + ps.InsertPairs("time_format", fmt.Sprint(*b.Time.TimeFormat)) } if b.Time.Localtime != nil { ps.InsertPairs("localtime", fmt.Sprint(*b.Time.Localtime)) diff --git a/apis/fluentd/v1alpha1/plugins/common/common_types.go b/apis/fluentd/v1alpha1/plugins/common/common_types.go index 1e0d873ad..0c1056256 100644 --- a/apis/fluentd/v1alpha1/plugins/common/common_types.go +++ b/apis/fluentd/v1alpha1/plugins/common/common_types.go @@ -20,10 +20,10 @@ type CommonFields struct { // Time defines the common parameters for the time plugin type Time struct { - // parses/formats value according to this type, default is *string - // +kubebuilder:validation:Enum:=float;unixtime;*string;mixed + // parses/formats value according to this type, default is string + // +kubebuilder:validation:Enum:=float;unixtime;string;mixed TimeType *string `json:"timeType,omitempty"` - // Process value according to the specified format. This is available only when time_type is *string + // Process value according to the specified format. This is available only when time_type is string TimeFormat *string `json:"timeFormat,omitempty"` // If true, uses local time. Localtime *bool `json:"localtime,omitempty"` @@ -199,7 +199,7 @@ func (j *Inject) Params(_ plugins.SecretLoader) (*params.PluginStore, error) { ps.InsertPairs("time_type", fmt.Sprint(*j.TimeType)) } if j.TimeFormat != nil { - ps.InsertPairs("time_type", fmt.Sprint(*j.TimeFormat)) + ps.InsertPairs("time_format", fmt.Sprint(*j.TimeFormat)) } if j.Localtime != nil { ps.InsertPairs("localtime", fmt.Sprint(*j.Localtime)) diff --git a/apis/fluentd/v1alpha1/plugins/common/parse_types.go b/apis/fluentd/v1alpha1/plugins/common/parse_types.go index 83abb6aa7..05e225c2d 100644 --- a/apis/fluentd/v1alpha1/plugins/common/parse_types.go +++ b/apis/fluentd/v1alpha1/plugins/common/parse_types.go @@ -29,7 +29,7 @@ type Parse struct { // Specify time field for event time. If the event doesn't have this field, current time is used. TimeKey *string `json:"timeKey,omitempty"` // If true, use Fluent::Eventnow(current time) as a timestamp when time_key is specified. - EstimateCurentEvent *bool `json:"estimateCurrentEvent,omitempty"` + EstimateCurrentEvent *bool `json:"estimateCurrentEvent,omitempty"` // If true, keep time field in th record. KeepTimeKey *bool `json:"keepTimeKey,omitempty"` // Specify timeout for parse processing. @@ -61,8 +61,8 @@ func (p *Parse) Params(_ plugins.SecretLoader) (*params.PluginStore, error) { if p.TimeKey != nil { ps.InsertPairs("time_key", fmt.Sprint(*p.TimeKey)) } - if p.EstimateCurentEvent != nil { - ps.InsertPairs("estimate_curent_event", fmt.Sprint(*p.EstimateCurentEvent)) + if p.EstimateCurrentEvent != nil { + ps.InsertPairs("estimate_current_event", fmt.Sprint(*p.EstimateCurrentEvent)) } if p.KeepTimeKey != nil { ps.InsertPairs("keep_timeout", fmt.Sprint(*p.KeepTimeKey)) @@ -75,7 +75,7 @@ func (p *Parse) Params(_ plugins.SecretLoader) (*params.PluginStore, error) { ps.InsertPairs("time_type", fmt.Sprint(*p.TimeType)) } if p.TimeFormat != nil { - ps.InsertPairs("time_type", fmt.Sprint(*p.TimeFormat)) + ps.InsertPairs("time_format", fmt.Sprint(*p.TimeFormat)) } if p.Localtime != nil { ps.InsertPairs("localtime", fmt.Sprint(*p.Localtime)) diff --git a/apis/fluentd/v1alpha1/plugins/input/tail.go b/apis/fluentd/v1alpha1/plugins/input/tail.go index d3e4bd31b..3e5388018 100644 --- a/apis/fluentd/v1alpha1/plugins/input/tail.go +++ b/apis/fluentd/v1alpha1/plugins/input/tail.go @@ -8,6 +8,7 @@ import ( "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins/common" "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins/params" ) + // The in_tail Input plugin allows Fluentd to read events from the tail of text files. Its behavior is similar to the tail -F command. type Tail struct { // +kubebuilder:validation:Required diff --git a/apis/fluentd/v1alpha1/plugins/output/datadog.go b/apis/fluentd/v1alpha1/plugins/output/datadog.go new file mode 100644 index 000000000..536237539 --- /dev/null +++ b/apis/fluentd/v1alpha1/plugins/output/datadog.go @@ -0,0 +1,53 @@ +package output + +import "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins" + +// Datadog defines the parameters for out_datadog plugin +type Datadog struct { + // This parameter is required in order to authenticate your fluent agent. + ApiKey *plugins.Secret `json:"apiKey,omitempty"` + // Event format, if true, the event is sent in json format. Othwerwise, in plain text. + UseJson *bool `json:"useJson,omitempty"` + // Automatically include the Fluentd tag in the record. + IncludeTagKey *bool `json:"includeTagKey,omitempty"` + // Where to store the Fluentd tag. + TagKey *string `json:"tagKey,omitempty"` + // Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. + TimestampKey *string `json:"timestampKey,omitempty"` + // If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. + UseSSL *bool `json:"useSSL,omitempty"` + // Disable SSL validation (useful for proxy forwarding) + NoSSLValidation *bool `json:"noSSLValidation,omitempty"` + // Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=65535 + SSLPort *uint32 `json:"sslPort,omitempty"` + // The number of retries before the output plugin stops. Set to -1 for unlimited retries + MaxRetries *uint32 `json:"maxRetries,omitempty"` + // The maximum time waited between each retry in seconds + MaxBackoff *uint32 `json:"maxBackoff,omitempty"` + // Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 + UseHTTP *bool `json:"useHTTP,omitempty"` + // Enable log compression for HTTP + UseCompression *bool `json:"useCompression,omitempty"` + // Set the log compression level for HTTP (1 to 9, 9 being the best ratio) + CompressionLevel *uint32 `json:"compressionLevel,omitempty"` + // This tells Datadog what integration it is + DDSource *string `json:"ddSource,omitempty"` + // Multiple value attribute. Can be used to refine the source attribute + DDSourcecategory *string `json:"ddSourcecategory,omitempty"` + // Custom tags with the following format "key1:value1, key2:value2" + DDTags *string `json:"ddTags,omitempty"` + // Used by Datadog to identify the host submitting the logs. + DDHostname *string `json:"ddHostname,omitempty"` + // Used by Datadog to correlate between logs, traces and metrics. + Service *string `json:"service,omitempty"` + // Proxy port when logs are not directly forwarded to Datadog and ssl is not used + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=65535 + Port *uint32 `json:"port,omitempty"` + // Proxy endpoint when logs are not directly forwarded to Datadog + Host *string `json:"host,omitempty"` + // HTTP proxy, only takes effect if HTTP forwarding is enabled (use_http). Defaults to HTTP_PROXY/http_proxy env vars. + HttpProxy *string `json:"httpProxy,omitempty"` +} diff --git a/apis/fluentd/v1alpha1/plugins/output/types.go b/apis/fluentd/v1alpha1/plugins/output/types.go index a74239b41..140ca1868 100644 --- a/apis/fluentd/v1alpha1/plugins/output/types.go +++ b/apis/fluentd/v1alpha1/plugins/output/types.go @@ -5,12 +5,13 @@ import ( "fmt" "strings" + "strconv" + "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins" "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins/common" "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins/custom" "github.com/fluent/fluent-operator/v2/apis/fluentd/v1alpha1/plugins/params" "github.com/fluent/fluent-operator/v2/pkg/utils" - "strconv" ) // OutputCommon defines the common parameters for output plugin @@ -49,6 +50,8 @@ type Output struct { CustomPlugin *custom.CustomPlugin `json:"customPlugin,omitempty"` // out_cloudwatch plugin CloudWatch *CloudWatch `json:"cloudWatch,omitempty"` + // datadog plugin + Datadog *Datadog `json:"datadog,omitempty"` } // DeepCopyInto implements the DeepCopyInto interface. @@ -156,6 +159,11 @@ func (o *Output) Params(loader plugins.SecretLoader) (*params.PluginStore, error ps.InsertType(string(params.CloudWatchOutputType)) return o.cloudWatchPlugin(ps, loader), nil } + + if o.Datadog != nil { + ps.InsertType(string(params.DatadogOutputType)) + return o.datadogPlugin(ps, loader), nil + } return o.customOutput(ps, loader), nil } @@ -773,4 +781,95 @@ func (o *Output) customOutput(parent *params.PluginStore, loader plugins.SecretL return parent } +func (o *Output) datadogPlugin(parent *params.PluginStore, sl plugins.SecretLoader) *params.PluginStore { + if o.Datadog.ApiKey != nil { + apiKey, err := sl.LoadSecret(*o.Datadog.ApiKey) + if err != nil { + return nil + } + parent.InsertPairs("api_key", apiKey) + } + + if o.Datadog.UseJson != nil { + parent.InsertPairs("use_json", fmt.Sprint(*o.Datadog.UseJson)) + } + + if o.Datadog.IncludeTagKey != nil { + parent.InsertPairs("include_tag_key", fmt.Sprint(*o.Datadog.IncludeTagKey)) + } + + if o.Datadog.TagKey != nil { + parent.InsertPairs("tag_key", fmt.Sprint(*o.Datadog.TagKey)) + } + + if o.Datadog.TimestampKey != nil { + parent.InsertPairs("timestamp_key", fmt.Sprint(*o.Datadog.TimestampKey)) + } + + if o.Datadog.UseSSL != nil { + parent.InsertPairs("use_ssl", fmt.Sprint(*o.Datadog.UseSSL)) + } + + if o.Datadog.NoSSLValidation != nil { + parent.InsertPairs("no_ssl_validation", fmt.Sprint(*o.Datadog.NoSSLValidation)) + } + + if o.Datadog.SSLPort != nil { + parent.InsertPairs("ssl_port", fmt.Sprint(*o.Datadog.SSLPort)) + } + + if o.Datadog.MaxRetries != nil { + parent.InsertPairs("max_retries", fmt.Sprint(*o.Datadog.MaxRetries)) + } + + if o.Datadog.MaxBackoff != nil { + parent.InsertPairs("max_backoff", fmt.Sprint(*o.Datadog.MaxBackoff)) + } + + if o.Datadog.UseHTTP != nil { + parent.InsertPairs("use_http", fmt.Sprint(*o.Datadog.UseHTTP)) + } + + if o.Datadog.UseCompression != nil { + parent.InsertPairs("use_compression", fmt.Sprint(*o.Datadog.UseCompression)) + } + + if o.Datadog.CompressionLevel != nil { + parent.InsertPairs("compression_level", fmt.Sprint(*o.Datadog.CompressionLevel)) + } + + if o.Datadog.DDSource != nil { + parent.InsertPairs("dd_source", fmt.Sprint(*o.Datadog.DDSource)) + } + + if o.Datadog.DDSourcecategory != nil { + parent.InsertPairs("dd_sourcecategory", fmt.Sprint(*o.Datadog.DDSourcecategory)) + } + + if o.Datadog.DDTags != nil { + parent.InsertPairs("dd_tags", fmt.Sprint(*o.Datadog.DDTags)) + } + + if o.Datadog.DDHostname != nil { + parent.InsertPairs("dd_hostname", fmt.Sprint(*o.Datadog.DDHostname)) + } + + if o.Datadog.Service != nil { + parent.InsertPairs("service", fmt.Sprint(*o.Datadog.Service)) + } + + if o.Datadog.Port != nil { + parent.InsertPairs("port", fmt.Sprint(*o.Datadog.Port)) + } + + if o.Datadog.Host != nil { + parent.InsertPairs("host", fmt.Sprint(*o.Datadog.Host)) + } + + if o.Datadog.HttpProxy != nil { + parent.InsertPairs("http_proxy", fmt.Sprint(*o.Datadog.HttpProxy)) + } + return parent +} + var _ plugins.Plugin = &Output{} diff --git a/apis/fluentd/v1alpha1/plugins/params/const.go b/apis/fluentd/v1alpha1/plugins/params/const.go index d2d7e6bf3..4c49f3cd8 100644 --- a/apis/fluentd/v1alpha1/plugins/params/const.go +++ b/apis/fluentd/v1alpha1/plugins/params/const.go @@ -35,6 +35,7 @@ const ( MatchPlugin PluginName = "match" BufferPlugin PluginName = "buffer" CloudWatchPlugin PluginName = "cloudwatch_logs" + DatadogPlugin PluginName = "datadog" BufferTag string = "tag" LabelTag string = "tag" @@ -66,6 +67,7 @@ const ( S3OutputType OutputType = "s3" LokiOutputType OutputType = "loki" CloudWatchOutputType OutputType = "cloudwatch_logs" + DatadogOutputType OutputType = "datadog" ) var ( diff --git a/apis/fluentd/v1alpha1/plugins/params/model.go b/apis/fluentd/v1alpha1/plugins/params/model.go index 2805aa1bd..99e03bdc2 100644 --- a/apis/fluentd/v1alpha1/plugins/params/model.go +++ b/apis/fluentd/v1alpha1/plugins/params/model.go @@ -34,12 +34,12 @@ func (ps *PluginStore) InsertPairs(key, value string) { ps.Store[key] = value } -// The @type parameter specifies the type of the plugin. +// The @type parameter specifies the type of the plugin func (ps *PluginStore) InsertType(value string) { ps.InsertPairs("@type", value) } -// SetIgnorePath will ignore the buffer path. +// SetIgnorePath will ignore the buffer path func (ps *PluginStore) SetIgnorePath() { ps.IgnorePath = true } @@ -58,7 +58,7 @@ func (ps *PluginStore) InsertChilds(childs ...*PluginStore) { } } -// The total hash string for this plugin store. +// The total hash string for this plugin store func (ps *PluginStore) Hash() string { c := NewPluginStore(ps.Name) @@ -78,7 +78,7 @@ func (ps *PluginStore) GetTag() string { return ps.Store["tag"] } -// Returns the @label value string of this plugin store. +// Returns the @label value string of this plugin store func (ps *PluginStore) RouteLabel() string { if ps.Name != "route" { return "" @@ -100,10 +100,10 @@ func (ps *PluginStore) String() string { } var buf bytes.Buffer - // Handles the head section. + // Handles the head directive ps.processHead(&buf) - // The body needs to be indented by two whitespace characters. + // The body needs to be indented by two whitespace characters parentPrefixWhitespaces := ps.PrefixWhitespaces ps.setWhitespaces(parentPrefixWhitespaces + IntervalWhitespaces) ps.processBody(&buf) @@ -115,7 +115,7 @@ func (ps *PluginStore) String() string { } } - // The tail must be indented in the same format as head. + // The tail must be indented in the same format as head directive ps.setWhitespaces(parentPrefixWhitespaces) ps.processTail(&buf) @@ -126,7 +126,7 @@ func (ps *PluginStore) setWhitespaces(curentWhitespaces string) { ps.PrefixWhitespaces = curentWhitespaces } -// processes head, i.e: +// write the head directive to buffer, i.e.: func (ps *PluginStore) processHead(buf *bytes.Buffer) { var head string switch PluginName(ps.Name) { @@ -173,11 +173,12 @@ func (ps *PluginStore) processBody(buf *bytes.Buffer) { buf.WriteString(body) } -// processes the tail +// write the tail directive to the buffer, i.e.: func (ps *PluginStore) processTail(buf *bytes.Buffer) { buf.WriteString(fmt.Sprintf("%s\n", ps.PrefixWhitespaces, ps.Name)) } +// decide to return the head directive with our without a filter - or func (ps *PluginStore) headFmtSprintf(value string) string { if value != "" { return fmt.Sprintf("%s<%s %s>\n", ps.PrefixWhitespaces, ps.Name, value) diff --git a/apis/fluentd/v1alpha1/tests/expected/duplicate-removal-cr-specs.cfg b/apis/fluentd/v1alpha1/tests/expected/duplicate-removal-cr-specs.cfg index 87ee162fb..16a9a1552 100644 --- a/apis/fluentd/v1alpha1/tests/expected/duplicate-removal-cr-specs.cfg +++ b/apis/fluentd/v1alpha1/tests/expected/duplicate-removal-cr-specs.cfg @@ -70,7 +70,7 @@ @type regexp expression /^(?[^ ]*) [^ ]* (?[^ ]*) [(? @@ -163,7 +163,7 @@ @type regexp expression /^(?[^ ]*) [^ ]* (?[^ ]*) [(? diff --git a/apis/fluentd/v1alpha1/tests/expected/fluentd-cluster-cfg-output-datadog.cfg b/apis/fluentd/v1alpha1/tests/expected/fluentd-cluster-cfg-output-datadog.cfg new file mode 100644 index 000000000..2a0a28c9e --- /dev/null +++ b/apis/fluentd/v1alpha1/tests/expected/fluentd-cluster-cfg-output-datadog.cfg @@ -0,0 +1,33 @@ + + @type forward + bind 0.0.0.0 + port 24224 + + + @id main + @type label_router + + @label @a2170d34e9940ec56d328100e375c43e + + namespaces default,kube-system + + + + \ No newline at end of file diff --git a/apis/fluentd/v1alpha1/tests/helper_test.go b/apis/fluentd/v1alpha1/tests/helper_test.go index 502f675e5..d7aab0f5f 100644 --- a/apis/fluentd/v1alpha1/tests/helper_test.go +++ b/apis/fluentd/v1alpha1/tests/helper_test.go @@ -1,10 +1,11 @@ package cfgrender import ( - "github.com/go-logr/logr" "strings" "testing" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -256,6 +257,32 @@ func Test_ClusterCfgOutput2CloudWatch(t *testing.T) { } } +func Test_ClusterCfgOutput2Datadog(t *testing.T) { + g := NewGomegaWithT(t) + sl := plugins.NewSecretLoader(nil, Fluentd.Namespace, logr.Logger{}) + + psr := fluentdv1alpha1.NewGlobalPluginResources("main") + psr.CombineGlobalInputsPlugins(sl, Fluentd.Spec.GlobalInputs) + + clustercfgRouter, err := psr.BuildCfgRouter(&FluentdClusterFluentdConfig1) + g.Expect(err).NotTo(HaveOccurred()) + clusterFilters := []fluentdv1alpha1.ClusterFilter{FluentdClusterFilter1} + clusterOutputs := []fluentdv1alpha1.ClusterOutput{FluentdClusterOutput2Datadog} + clustercfgResources, _ := psr.PatchAndFilterClusterLevelResources(sl, FluentdClusterFluentdConfig1.GetCfgId(), clusterFilters, clusterOutputs) + err = psr.WithCfgResources(*clustercfgRouter.Label, clustercfgResources) + g.Expect(err).NotTo(HaveOccurred()) + + // we should not see any permutations in serialized config + i := 0 + for i < maxRuntimes { + config, errs := psr.RenderMainConfig(false) + g.Expect(errs).NotTo(HaveOccurred()) + g.Expect(strings.TrimSpace(string(getExpectedCfg("./expected/fluentd-cluster-cfg-output-datadog.cfg")))).To(Equal(config)) + + i++ + } +} + func Test_MixedCfgs2OpenSearch(t *testing.T) { g := NewGomegaWithT(t) sl := plugins.NewSecretLoader(nil, Fluentd.Namespace, logr.Logger{}) diff --git a/apis/fluentd/v1alpha1/tests/tools.go b/apis/fluentd/v1alpha1/tests/tools.go index 817a97a26..9b75a5d6e 100644 --- a/apis/fluentd/v1alpha1/tests/tools.go +++ b/apis/fluentd/v1alpha1/tests/tools.go @@ -399,6 +399,22 @@ spec: webIdentityTokenFile: /var/run/secrets/something/token ` + FluentdClusterOutput2Datadog fluentdv1alpha1.ClusterOutput + FluentdClusterOutput2DatadogRaw = ` +apiVersion: fluentd.fluent.io/v1alpha1 +kind: ClusterOutput +metadata: + name: fluentd-output-datadog + labels: + output.fluentd.fluent.io/enabled: "true" +spec: + outputs: + - datadog: + host: http-intake.logs.datadoghq.com + port: 443 + ddSource: kubernetes + ddSourcecategory: kubernetes +` once sync.Once ) @@ -532,6 +548,7 @@ func init() { ParseIntoObject(FluentdOutputUser1Raw, &FluentdOutputUser1) ParseIntoObject(FluentdClusterOutputCustomRaw, &FluentdClusterOutputCustom) ParseIntoObject(FluentdClusterOutput2CloudWatchRaw, &FluentdClusterOutput2CloudWatch) + ParseIntoObject(FluentdClusterOutput2DatadogRaw, &FluentdClusterOutput2Datadog) }, ) } diff --git a/apis/fluentd/v1alpha1/zz_generated.deepcopy.go b/apis/fluentd/v1alpha1/zz_generated.deepcopy.go index 5045ab905..4720d5b6e 100644 --- a/apis/fluentd/v1alpha1/zz_generated.deepcopy.go +++ b/apis/fluentd/v1alpha1/zz_generated.deepcopy.go @@ -694,6 +694,16 @@ func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DefaultFilterSelector != nil { + in, out := &in.DefaultFilterSelector, &out.DefaultFilterSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.DefaultOutputSelector != nil { + in, out := &in.DefaultOutputSelector, &out.DefaultOutputSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) @@ -790,6 +800,11 @@ func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) { } } in.Service.DeepCopyInto(&out.Service) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec. diff --git a/apis/generated/clientset/versioned/fake/register.go b/apis/generated/clientset/versioned/fake/register.go index bca854472..733f848f2 100644 --- a/apis/generated/clientset/versioned/fake/register.go +++ b/apis/generated/clientset/versioned/fake/register.go @@ -38,14 +38,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/apis/generated/clientset/versioned/scheme/register.go b/apis/generated/clientset/versioned/scheme/register.go index 3ed90362c..2c72f75c3 100644 --- a/apis/generated/clientset/versioned/scheme/register.go +++ b/apis/generated/clientset/versioned/scheme/register.go @@ -38,14 +38,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/charts/fluent-operator/Chart.yaml b/charts/fluent-operator/Chart.yaml index 05c02ce7b..ec19fcbb8 100644 --- a/charts/fluent-operator/Chart.yaml +++ b/charts/fluent-operator/Chart.yaml @@ -15,12 +15,12 @@ description: A Helm chart for Kubernetes # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.2.1 +version: 2.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 2.2.0 +appVersion: 2.3.0 dependencies: - name: fluent-bit-crds diff --git a/charts/fluent-operator/charts/fluent-bit-crds/Chart.yaml b/charts/fluent-operator/charts/fluent-bit-crds/Chart.yaml index 0bfaec58f..81c3e8b4e 100644 --- a/charts/fluent-operator/charts/fluent-bit-crds/Chart.yaml +++ b/charts/fluent-operator/charts/fluent-bit-crds/Chart.yaml @@ -14,10 +14,10 @@ description: A Helm chart delivering fluenbt-bit controller CRDS # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.1.0 +version: 2.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.1.0" +appVersion: "2.3.0" diff --git a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_clusteroutputs.yaml b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_clusteroutputs.yaml index a51b4eea1..6e1f283b2 100644 --- a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_clusteroutputs.yaml +++ b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_clusteroutputs.yaml @@ -1297,6 +1297,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -2160,6 +2215,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: diff --git a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_collectors.yaml b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_collectors.yaml index be86399d4..a2b9b26e6 100644 --- a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_collectors.yaml +++ b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_collectors.yaml @@ -1390,6 +1390,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for the + Fluentbit collector pods + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: diff --git a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_fluentbits.yaml b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_fluentbits.yaml index 70e5e6da9..f32e7f267 100644 --- a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_fluentbits.yaml +++ b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_fluentbits.yaml @@ -883,6 +883,166 @@ spec: containerLogRealPath: description: Container log path type: string + containerSecurityContext: + description: ContainerSecurityContext holds container-level security + attributes. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. Note that this field cannot be set when spec.os.name + is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object disableService: description: DisableService tells if the fluentbit service should be deployed. @@ -4302,6 +4462,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluent-bit + pods. + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: diff --git a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_outputs.yaml b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_outputs.yaml index e2914257f..193401433 100644 --- a/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_outputs.yaml +++ b/charts/fluent-operator/charts/fluent-bit-crds/crds/fluentbit.fluent.io_outputs.yaml @@ -1297,6 +1297,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -2160,6 +2215,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: diff --git a/charts/fluent-operator/charts/fluentd-crds/Chart.yaml b/charts/fluent-operator/charts/fluentd-crds/Chart.yaml index 8252d4081..f5730348f 100644 --- a/charts/fluent-operator/charts/fluentd-crds/Chart.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/Chart.yaml @@ -14,10 +14,10 @@ description: A Helm chart delivering fluentd controller CRDS # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.1.0 +version: 2.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.1.0" +appVersion: "2.3.0" diff --git a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusterfilters.yaml b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusterfilters.yaml index 5b99a857c..bb9e30bda 100644 --- a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusterfilters.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusterfilters.yaml @@ -180,7 +180,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -194,11 +194,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -338,7 +338,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -348,11 +348,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -392,7 +392,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -402,11 +402,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusteroutputs.yaml b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusteroutputs.yaml index 933e37e3a..4cb5f060c 100644 --- a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusteroutputs.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_clusteroutputs.yaml @@ -200,7 +200,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -210,11 +210,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -413,6 +413,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -549,7 +672,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -559,11 +682,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1241,7 +1364,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1251,11 +1374,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_filters.yaml b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_filters.yaml index 02345496d..7892d81e4 100644 --- a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_filters.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_filters.yaml @@ -180,7 +180,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -194,11 +194,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -338,7 +338,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -348,11 +348,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -392,7 +392,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -402,11 +402,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_fluentds.yaml b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_fluentds.yaml index 5716cdaf8..952415f70 100644 --- a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_fluentds.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_fluentds.yaml @@ -1262,6 +1262,98 @@ spec: type: object type: object type: object + defaultFilterSelector: + description: Select cluster filter plugins used to filter for the + default cluster output + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + defaultOutputSelector: + description: Select cluster output plugins used to send all logs that + did not match a route to the matching outputs + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic disableService: description: By default will build the related service according to the globalinputs definition. @@ -1747,7 +1839,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1761,11 +1853,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -1996,7 +2088,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -2010,11 +2102,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -2140,6 +2232,17 @@ spec: type: object x-kubernetes-map-type: atomic type: array + logLevel: + default: info + description: Global logging verbosity + enum: + - fatal + - error + - warn + - info + - debug + - trace + type: string nodeSelector: additionalProperties: type: string @@ -2253,6 +2356,179 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluentd + pods. + type: string + securityContext: + description: PodSecurityContext represents the security context for + the fluentd pods. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object service: description: Service represents configurations on the fluentd service. properties: diff --git a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_outputs.yaml b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_outputs.yaml index 43350b539..8a2e1cdad 100644 --- a/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_outputs.yaml +++ b/charts/fluent-operator/charts/fluentd-crds/crds/fluentd.fluent.io_outputs.yaml @@ -200,7 +200,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -210,11 +210,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -413,6 +413,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -549,7 +672,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -559,11 +682,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1241,7 +1364,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1251,11 +1374,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/charts/fluent-operator/templates/_helpers.tpl b/charts/fluent-operator/templates/_helpers.tpl index 7805f0ea4..50e5cf2e0 100644 --- a/charts/fluent-operator/templates/_helpers.tpl +++ b/charts/fluent-operator/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "fluentbit-operator.name" -}} +{{- define "fluent-operator.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "fluentbit-operator.fullname" -}} +{{- define "fluent-operator.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} @@ -24,19 +24,30 @@ If release name contains chart name it will be used as a full name. {{- end }} {{- end }} +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "fluent-operator.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} -{{- define "fluentbit-operator.chart" -}} +{{- define "fluent-operator.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "fluentbit-operator.labels" -}} -helm.sh/chart: {{ include "fluentbit-operator.chart" . }} -{{ include "fluentbit-operator.selectorLabels" . }} +{{- define "fluent-operator.labels" -}} +helm.sh/chart: {{ include "fluent-operator.chart" . }} +{{ include "fluent-operator.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -46,17 +57,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "fluentbit-operator.selectorLabels" -}} -app.kubernetes.io/name: {{ include "fluentbit-operator.name" . }} +{{- define "fluent-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "fluent-operator.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} -{{- define "fluentbit-operator.serviceAccountName" -}} +{{- define "fluent-operator.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} -{{- default (include "fluentbit-operator.fullname" .) .Values.serviceAccount.name }} +{{- default (include "fluent-operator.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} diff --git a/charts/fluent-operator/templates/fluent-operator-deployment.yaml b/charts/fluent-operator/templates/fluent-operator-deployment.yaml index bca4c2a83..68eaafbf5 100644 --- a/charts/fluent-operator/templates/fluent-operator-deployment.yaml +++ b/charts/fluent-operator/templates/fluent-operator-deployment.yaml @@ -40,6 +40,8 @@ spec: - '-c' - set -ex; echo CONTAINER_ROOT_DIR=$(docker info -f '{{`{{.DockerRootDir}}`}}' 2> /dev/null) > /fluent-operator/fluent-bit.env + resources: + {{- toYaml .Values.operator.initcontainer.resources | nindent 10 }} volumeMounts: - name: env mountPath: /fluent-operator @@ -58,6 +60,8 @@ spec: - '-c' - set -ex; echo CONTAINER_ROOT_DIR={{ .Values.operator.logPath.containerd }} > /fluent-operator/fluent-bit.env + resources: + {{- toYaml .Values.operator.initcontainer.resources | nindent 10 }} volumeMounts: - name: env mountPath: /fluent-operator @@ -73,6 +77,8 @@ spec: - '-c' - set -ex; echo CONTAINER_ROOT_DIR={{ .Values.operator.logPath.crio }} > /fluent-operator/fluent-bit.env + resources: + {{- toYaml .Values.operator.initcontainer.resources | nindent 10 }} volumeMounts: - name: env mountPath: /fluent-operator @@ -80,6 +86,10 @@ spec: containers: - name: fluent-operator image: {{ .Values.operator.container.repository }}:{{ .Values.operator.container.tag }} + {{- if .Values.operator.securityContext }} + securityContext: + {{ toYaml .Values.operator.securityContext | nindent 10 }} + {{- end }} resources: {{- toYaml .Values.operator.resources | nindent 10 }} env: @@ -111,6 +121,6 @@ spec: {{ toYaml .Values.operator.nodeSelector | nindent 8 }} {{- end }} {{- if .Values.operator.podSecurityContext }} - podSecurityContext: + securityContext: {{ toYaml .Values.operator.podSecurityContext | nindent 8 }} {{- end }} diff --git a/charts/fluent-operator/templates/fluentbit-clusterinput-systemd.yaml b/charts/fluent-operator/templates/fluentbit-clusterinput-systemd.yaml index 7d1aa8952..8912f2105 100644 --- a/charts/fluent-operator/templates/fluentbit-clusterinput-systemd.yaml +++ b/charts/fluent-operator/templates/fluentbit-clusterinput-systemd.yaml @@ -14,13 +14,19 @@ spec: path: {{ .Values.fluentbit.input.systemd.path }} db: /fluent-bit/tail/systemd.db dbSync: Normal + stripUnderscores: {{ .Values.fluentbit.input.systemd.stripUnderscores | quote }} systemdFilter: +{{- if .Values.fluentbit.input.systemd.systemdFilter.enable }} - _SYSTEMD_UNIT={{ .Values.containerRuntime }}.service {{- if .Values.fluentbit.input.systemd.includeKubelet }} - _SYSTEMD_UNIT=kubelet.service {{- end }} - storageType: { { .Values.fluentbit.input.systemd.storageType } } - pauseOnChunksOverlimit: { { .Values.fluentbit.input.systemd.pauseOnChunksOverlimit } } + {{- if .Values.fluentbit.input.systemd.systemdFilter.filters }} +{{- toYaml .Values.fluentbit.input.systemd.systemdFilter.filters | nindent 6 }} + {{- end }} +{{- end }} + storageType: {{ .Values.fluentbit.input.systemd.storageType }} + pauseOnChunksOverlimit: {{ .Values.fluentbit.input.systemd.pauseOnChunksOverlimit }} {{- end }} {{- end }} {{- end }} diff --git a/charts/fluent-operator/templates/fluentbit-fluentBit.yaml b/charts/fluent-operator/templates/fluentbit-fluentBit.yaml index b538a90a0..a09c648cd 100644 --- a/charts/fluent-operator/templates/fluentbit-fluentBit.yaml +++ b/charts/fluent-operator/templates/fluentbit-fluentBit.yaml @@ -26,6 +26,10 @@ spec: envVars: {{ toYaml .Values.fluentbit.envVars | indent 4 }} {{- end }} +{{- if .Values.fluentbit.securityContext }} + containerSecurityContext: + {{ toYaml .Values.fluentbit.securityContext | nindent 4 }} +{{- end }} {{- with .Values.fluentbit.tolerations }} tolerations: {{ toYaml . | indent 4 }} @@ -40,6 +44,9 @@ spec: {{- with .Values.fluentbit.affinity }} affinity: {{ toYaml . | indent 4 }} + {{- end }} + {{- if .Values.fluentbit.schedulerName }} + schedulerName: {{ .Values.fluentbit.schedulerName }} {{- end }} {{- if .Values.fluentbit.secrets }} secrets: @@ -66,5 +73,9 @@ spec: labels: {{ toYaml .Values.fluentbit.labels | indent 4 }} {{- end }} + {{- if .Values.fluentbit.podSecurityContext }} + securityContext: +{{ toYaml .Values.fluentbit.podSecurityContext | nindent 4 }} + {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/charts/fluent-operator/templates/fluentd-fluentd.yaml b/charts/fluent-operator/templates/fluentd-fluentd.yaml index 6db6b1174..5f5199cb1 100644 --- a/charts/fluent-operator/templates/fluentd-fluentd.yaml +++ b/charts/fluent-operator/templates/fluentd-fluentd.yaml @@ -18,5 +18,11 @@ spec: fluentdCfgSelector: matchLabels: config.fluentd.fluent.io/enabled: "true" + {{- if .Values.fluentd.schedulerName }} + schedulerName: {{ .Values.fluentd.schedulerName }} + {{- end }} + {{- if .Values.fluentd.logLevel }} + logLevel: {{ .Values.fluentd.logLevel }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/fluent-operator/values.yaml b/charts/fluent-operator/values.yaml index d98da471e..7812ecaab 100644 --- a/charts/fluent-operator/values.yaml +++ b/charts/fluent-operator/values.yaml @@ -14,6 +14,14 @@ operator: initcontainer: repository: "docker" tag: "20.10" + + resources: + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 50m + memory: 64Mi container: repository: "kubesphere/fluent-operator" tag: "latest" @@ -25,6 +33,8 @@ operator: priorityClassName: "" # Pod security context for Fluent Operator. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ podSecurityContext: {} + # Container security context for Fluent Operator container. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} # Fluent Operator resources. Usually user needn't to adjust these. resources: limits: @@ -61,7 +71,7 @@ fluentbit: enable: true image: repository: "kubesphere/fluent-bit" - tag: "v2.1.3" + tag: "v2.1.4" # fluentbit resources. If you do want to specify resources, adjust them as necessary # You can adjust it based on the log volume. resources: @@ -87,6 +97,10 @@ fluentbit: imagePullSecrets: [] # - name: "image-pull-secret" secrets: [] + # Pod security context for Fluent Bit pods. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + podSecurityContext: {} + # Security context for Fluent Bit container. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} # List of volumes that can be mounted by containers belonging to the pod. additionalVolumes: [] # Pod volumes to mount into the container's filesystem. @@ -102,14 +116,15 @@ fluentbit: # nodeSelector configuration for Fluent Bit pods. Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # Node tolerations applied to Fluent Bit pods. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - tolerations: - - operator: Exists + tolerations: + - operator: Exists # Priority Class applied to Fluent Bit pods. Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass priorityClassName: "" # Environment variables that can be passed to fluentbit pods envVars: [] # - name: FOO # value: "bar" + schedulerName: "" # Remove the above empty volumes and volumesMounts, and then set additionalVolumes and additionalVolumesMounts as below if you want to collect node exporter metrics # additionalVolumes: @@ -149,10 +164,14 @@ fluentbit: pauseOnChunksOverlimit: off systemd: enable: true + systemdFilter: + enable: true + filters: [] path: "/var/log/journal" includeKubelet: true storageType: memory pauseOnChunksOverlimit: off + stripUnderscores: "off" nodeExporterMetrics: {} # uncomment below nodeExporterMetrics section if you want to collect node exporter metrics # nodeExporterMetrics: @@ -219,9 +238,9 @@ fluentbit: kubeedge: enable: false prometheusRemoteWrite: - # Change the host to the address of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data + # Change the host to the address of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data host: "" - # Change the port to the port of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data + # Change the port to the port of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data port: "" fluentd: @@ -246,6 +265,8 @@ fluentd: requests: cpu: 100m memory: 128Mi + schedulerName: "" + logLevel: "" # Configure the output plugin parameter in Fluentd. # Fluentd is disabled by default, if you enable it make sure to also set up an output to use. output: diff --git a/cmd/doc-gen/main.go b/cmd/doc-gen/main.go index 3153ad940..83ae08adf 100644 --- a/cmd/doc-gen/main.go +++ b/cmd/doc-gen/main.go @@ -55,7 +55,7 @@ type DocumentsLocation struct { // Inspired by coreos/prometheus-operator: https://github.com/coreos/prometheus-operator func main() { - var pluginsLoactions = []DocumentsLocation{ + var pluginsLocations = []DocumentsLocation{ { path: fluentbitPluginPath, name: "fluentbit", @@ -65,9 +65,9 @@ func main() { name: "fluentd", }, } - plugins(pluginsLoactions) + plugins(pluginsLocations) - var crdsLoactions = []DocumentsLocation{ + var crdsLocations = []DocumentsLocation{ { path: fluentbitCrdsPath, name: "fluentbit", @@ -77,7 +77,7 @@ func main() { name: "fluentd", }, } - crds(crdsLoactions) + crds(crdsLocations) } func plugins(docsLocations []DocumentsLocation) { @@ -140,8 +140,8 @@ func plugins(docsLocations []DocumentsLocation) { } } -func crds(docsLoactions []DocumentsLocation) { - for _, dl := range docsLoactions { +func crds(docsLocations []DocumentsLocation) { + for _, dl := range docsLocations { var srcs []string err := filepath.Walk(dl.path, func(path string, info os.FileInfo, err error) error { if err != nil { diff --git a/cmd/fluent-watcher/fluentbit/Dockerfile b/cmd/fluent-watcher/fluentbit/Dockerfile index fb65abff8..b87cba03d 100644 --- a/cmd/fluent-watcher/fluentbit/Dockerfile +++ b/cmd/fluent-watcher/fluentbit/Dockerfile @@ -6,7 +6,7 @@ WORKDIR /code RUN echo $(ls -al /code) RUN CGO_ENABLED=0 go build -ldflags '-w -s' -o /fluent-bit/fluent-bit /code/cmd/fluent-watcher/fluentbit/main.go -FROM fluent/fluent-bit:2.1.3 +FROM fluent/fluent-bit:2.1.4 LABEL Description="Fluent Bit docker image" Vendor="Fluent" Version="1.0" COPY conf/fluent-bit.conf conf/parsers.conf /fluent-bit/etc/ diff --git a/cmd/fluent-watcher/fluentbit/Dockerfile.debug b/cmd/fluent-watcher/fluentbit/Dockerfile.debug index acbe5ad66..045f40481 100644 --- a/cmd/fluent-watcher/fluentbit/Dockerfile.debug +++ b/cmd/fluent-watcher/fluentbit/Dockerfile.debug @@ -6,7 +6,7 @@ WORKDIR /code RUN echo $(ls -al /code) RUN CGO_ENABLED=0 go build -ldflags '-w -s' -o /fluent-bit/fluent-bit /code/cmd/fluent-watcher/fluentbit/main.go -FROM fluent/fluent-bit:2.1.3-debug +FROM fluent/fluent-bit:2.1.4-debug LABEL Description="Fluent Bit docker image" Vendor="Fluent" Version="1.0" COPY conf/fluent-bit.conf conf/parsers.conf /fluent-bit/etc/ diff --git a/cmd/fluent-watcher/fluentd/Dockerfile.amd64 b/cmd/fluent-watcher/fluentd/Dockerfile.amd64 index 8fa3aa5bd..d66583920 100644 --- a/cmd/fluent-watcher/fluentd/Dockerfile.amd64 +++ b/cmd/fluent-watcher/fluentd/Dockerfile.amd64 @@ -49,6 +49,7 @@ RUN apk update \ fluent-plugin-opensearch \ fluent-plugin-grafana-loki \ fluent-plugin-cloudwatch-logs \ + fluent-plugin-datadog \ && apk del .build-deps \ && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem /usr/lib/ruby/gems/2.*/gems/fluentd-*/test diff --git a/cmd/fluent-watcher/fluentd/Dockerfile.arm64 b/cmd/fluent-watcher/fluentd/Dockerfile.arm64 index 9cf6e7624..c7a4535be 100644 --- a/cmd/fluent-watcher/fluentd/Dockerfile.arm64 +++ b/cmd/fluent-watcher/fluentd/Dockerfile.arm64 @@ -55,6 +55,7 @@ RUN apt-get update \ fluent-plugin-multi-format-parser \ fluent-plugin-aws-elasticsearch-service \ fluent-plugin-opensearch \ + fluent-plugin-datadog \ && echo "plugin installed." \ && wget -O /tmp/jemalloc-5.3.0.tar.bz2 https://github.com/jemalloc/jemalloc/releases/download/5.3.0/jemalloc-5.3.0.tar.bz2 \ && cd /tmp && tar -xjf jemalloc-5.3.0.tar.bz2 && cd jemalloc-5.3.0/ \ diff --git a/cmd/fluent-watcher/fluentd/Dockerfile.arm64.base b/cmd/fluent-watcher/fluentd/Dockerfile.arm64.base index 53c801992..41f01e95e 100644 --- a/cmd/fluent-watcher/fluentd/Dockerfile.arm64.base +++ b/cmd/fluent-watcher/fluentd/Dockerfile.arm64.base @@ -45,6 +45,7 @@ RUN apt-get update \ fluent-plugin-multi-format-parser \ fluent-plugin-aws-elasticsearch-service \ fluent-plugin-opensearch \ + fluent-plugin-datadog \ && wget -O /tmp/jemalloc-4.5.0.tar.bz2 https://github.com/jemalloc/jemalloc/releases/download/4.5.0/jemalloc-4.5.0.tar.bz2 \ && cd /tmp && tar -xjf jemalloc-4.5.0.tar.bz2 && cd jemalloc-4.5.0/ \ && ./configure && make \ diff --git a/cmd/fluent-watcher/fluentd/base/Dockerfile b/cmd/fluent-watcher/fluentd/base/Dockerfile index 7c497ff44..3961f71a1 100644 --- a/cmd/fluent-watcher/fluentd/base/Dockerfile +++ b/cmd/fluent-watcher/fluentd/base/Dockerfile @@ -29,6 +29,7 @@ RUN apk update \ && gem install elasticsearch -v 7.13.3 \ && gem install elasticsearch-xpack -v 7.13.3 \ && gem install fluent-plugin-detect-exceptions -v 0.0.14 \ + && gem install fluent-plugin-opensearch -v 1.1.0 \ && gem install \ fluent-plugin-s3 \ fluent-plugin-rewrite-tag-filter \ @@ -40,7 +41,6 @@ RUN apk update \ fluent-plugin-record-modifier \ fluent-plugin-multi-format-parser \ fluent-plugin-aws-elasticsearch-service \ - fluent-plugin-opensearch \ fluent-plugin-grafana-loki \ && apk del .build-deps \ && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem /usr/lib/ruby/gems/3.*/gems/fluentd-*/test diff --git a/cmd/upgrade/upgrade.sh b/cmd/upgrade/upgrade.sh new file mode 100644 index 000000000..76ee81b4c --- /dev/null +++ b/cmd/upgrade/upgrade.sh @@ -0,0 +1,80 @@ +#!/bin/bash +namespace=kubesphere-logging-system +FluentbitOperator="fluentbit-operator" + +function error_exit { + echo "$1" 1>&2 + exit 1 +} + +function migrate(){ +## Converting an existing configuration to a new one +local oldKind=$1 +local newKind=$2 +local list=$(kubectl get $oldKind.logging.kubesphere.io -A -o json) || error_exit "Cannot get resource $oldKind" +local name=($(echo $list | jq -r '.items[].metadata.name | @json')) +local labels=($(echo $list | jq -r '.items[].metadata.labels | @json')) +local spec=($(echo $list | jq -r '.items[].spec | @json')) +local ns=($(echo $list | jq -r '.items[].metadata.namespace | @json')) +local size=${#spec[*]} +echo "Number of original $oldKind configuration files:$size" +for((i=0;i<${size};i++));do +if [[ "${kind}" = "fluentbits" ]]; then +cluster_resource_list[i]="{ +\"apiVersion\": \"fluentbit.fluent.io/v1alpha2\", +\"kind\": \"${newKind}\", +\"metadata\": { +\"name\": ${name[i]}, +\"labels\": ${labels[i]}, +\"namespace\": \"${ns}\" +}, +\"spec\": ${spec[i]} +}" +else +cluster_resource_list[i]="{ +\"apiVersion\": \"fluentbit.fluent.io/v1alpha2\", +\"kind\": \"${kind}\", +\"metadata\": { +\"name\": ${name[i]}, +\"labels\": ${labels[i]} +}, +\"spec\": ${spec[i]} +}" +fi +done + +## Uninstall the fluentbit-operator and the original configuration +for((i=0;i<${size};i++));do +echo "${name[i]}" + temp=$(echo ${name[i]} | sed 's/"//g') + echo "$temp" + kubectl delete $oldKind.logging.kubesphere.io $temp -n ${namespace} +done + +for((i=0;i<${size};i++));do +echo ${cluster_resource_list[i]} | kubectl apply -f - || error_exit "Cannot apply resource $oldKind" +done +} + +migrate "Input" "ClusterInput" +migrate "Parser" "ClusterParser" +migrate "Filter" "ClusterFilter" +migrate "Output" "ClusterOutput" +migrate "FluentbitConfig" "ClusterFluentBitConfig" +migrate "FluentBit" "FluentBit" + +# Determine if Deployment exists +if kubectl get deployment -n $namespace $FluentbitOperator >/dev/null 2>&1; then + # Delete Deployment if it exists + kubectl delete deployment -n $namespace $FluentbitOperator + kubectl delete clusterrolebinding kubesphere:operator:fluentbit-operator + kubectl delete clusterrole kubesphere:operator:fluentbit-operator + kubectl delete serviceaccount fluentbit-operator -n $namespace + echo "Deployment $FluentbitOperator deleted" +else + # If it does not exist, output the message + echo "Deployment $FluentbitOperator does not exist" +fi + +## Delete the old crd +kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "logging.kubesphere.io" | xargs -I crd_name kubectl delete crd crd_name diff --git a/config/crd/bases/fluentbit.fluent.io_clusteroutputs.yaml b/config/crd/bases/fluentbit.fluent.io_clusteroutputs.yaml index a51b4eea1..6e1f283b2 100644 --- a/config/crd/bases/fluentbit.fluent.io_clusteroutputs.yaml +++ b/config/crd/bases/fluentbit.fluent.io_clusteroutputs.yaml @@ -1297,6 +1297,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -2160,6 +2215,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: diff --git a/config/crd/bases/fluentbit.fluent.io_collectors.yaml b/config/crd/bases/fluentbit.fluent.io_collectors.yaml index be86399d4..a2b9b26e6 100644 --- a/config/crd/bases/fluentbit.fluent.io_collectors.yaml +++ b/config/crd/bases/fluentbit.fluent.io_collectors.yaml @@ -1390,6 +1390,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for the + Fluentbit collector pods + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: diff --git a/config/crd/bases/fluentbit.fluent.io_fluentbits.yaml b/config/crd/bases/fluentbit.fluent.io_fluentbits.yaml index 70e5e6da9..f32e7f267 100644 --- a/config/crd/bases/fluentbit.fluent.io_fluentbits.yaml +++ b/config/crd/bases/fluentbit.fluent.io_fluentbits.yaml @@ -883,6 +883,166 @@ spec: containerLogRealPath: description: Container log path type: string + containerSecurityContext: + description: ContainerSecurityContext holds container-level security + attributes. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. Note that this field cannot be set when spec.os.name + is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object disableService: description: DisableService tells if the fluentbit service should be deployed. @@ -4302,6 +4462,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluent-bit + pods. + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: diff --git a/config/crd/bases/fluentbit.fluent.io_outputs.yaml b/config/crd/bases/fluentbit.fluent.io_outputs.yaml index e2914257f..193401433 100644 --- a/config/crd/bases/fluentbit.fluent.io_outputs.yaml +++ b/config/crd/bases/fluentbit.fluent.io_outputs.yaml @@ -1297,6 +1297,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -2160,6 +2215,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: diff --git a/config/crd/bases/fluentd.fluent.io_clusterfilters.yaml b/config/crd/bases/fluentd.fluent.io_clusterfilters.yaml index 5b99a857c..bb9e30bda 100644 --- a/config/crd/bases/fluentd.fluent.io_clusterfilters.yaml +++ b/config/crd/bases/fluentd.fluent.io_clusterfilters.yaml @@ -180,7 +180,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -194,11 +194,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -338,7 +338,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -348,11 +348,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -392,7 +392,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -402,11 +402,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/config/crd/bases/fluentd.fluent.io_clusteroutputs.yaml b/config/crd/bases/fluentd.fluent.io_clusteroutputs.yaml index 933e37e3a..4cb5f060c 100644 --- a/config/crd/bases/fluentd.fluent.io_clusteroutputs.yaml +++ b/config/crd/bases/fluentd.fluent.io_clusteroutputs.yaml @@ -200,7 +200,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -210,11 +210,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -413,6 +413,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -549,7 +672,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -559,11 +682,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1241,7 +1364,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1251,11 +1374,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/config/crd/bases/fluentd.fluent.io_filters.yaml b/config/crd/bases/fluentd.fluent.io_filters.yaml index 02345496d..7892d81e4 100644 --- a/config/crd/bases/fluentd.fluent.io_filters.yaml +++ b/config/crd/bases/fluentd.fluent.io_filters.yaml @@ -180,7 +180,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -194,11 +194,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -338,7 +338,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -348,11 +348,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -392,7 +392,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -402,11 +402,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/config/crd/bases/fluentd.fluent.io_fluentds.yaml b/config/crd/bases/fluentd.fluent.io_fluentds.yaml index 5716cdaf8..952415f70 100644 --- a/config/crd/bases/fluentd.fluent.io_fluentds.yaml +++ b/config/crd/bases/fluentd.fluent.io_fluentds.yaml @@ -1262,6 +1262,98 @@ spec: type: object type: object type: object + defaultFilterSelector: + description: Select cluster filter plugins used to filter for the + default cluster output + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + defaultOutputSelector: + description: Select cluster output plugins used to send all logs that + did not match a route to the matching outputs + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic disableService: description: By default will build the related service according to the globalinputs definition. @@ -1747,7 +1839,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1761,11 +1853,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -1996,7 +2088,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -2010,11 +2102,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -2140,6 +2232,17 @@ spec: type: object x-kubernetes-map-type: atomic type: array + logLevel: + default: info + description: Global logging verbosity + enum: + - fatal + - error + - warn + - info + - debug + - trace + type: string nodeSelector: additionalProperties: type: string @@ -2253,6 +2356,179 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluentd + pods. + type: string + securityContext: + description: PodSecurityContext represents the security context for + the fluentd pods. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object service: description: Service represents configurations on the fluentd service. properties: diff --git a/config/crd/bases/fluentd.fluent.io_outputs.yaml b/config/crd/bases/fluentd.fluent.io_outputs.yaml index 43350b539..8a2e1cdad 100644 --- a/config/crd/bases/fluentd.fluent.io_outputs.yaml +++ b/config/crd/bases/fluentd.fluent.io_outputs.yaml @@ -200,7 +200,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -210,11 +210,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -413,6 +413,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -549,7 +672,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -559,11 +682,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1241,7 +1364,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1251,11 +1374,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/controllers/collector_controller.go b/controllers/collector_controller.go index 7e635866e..833b94331 100644 --- a/controllers/collector_controller.go +++ b/controllers/collector_controller.go @@ -23,6 +23,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -92,40 +93,30 @@ func (r *CollectorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Install RBAC resources for the filter plugin kubernetes - var rbacObj, saObj, bindingObj client.Object - rbacObj, saObj, bindingObj = operator.MakeRBACObjects(co.Name, co.Namespace, "collector", co.Spec.RBACRules, co.Spec.ServiceAccountAnnotations) - // Set ServiceAccount's owner to this fluentbit - if err := ctrl.SetControllerReference(&co, saObj, r.Scheme); err != nil { + cr, sa, crb := operator.MakeRBACObjects(co.Name, co.Namespace, "collector", co.Spec.RBACRules, co.Spec.ServiceAccountAnnotations) + // Deploy Fluent Bit Collector ClusterRole + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, cr, r.mutate(cr, &co)); err != nil { return ctrl.Result{}, err } - if err := r.Create(ctx, rbacObj); err != nil && !errors.IsAlreadyExists(err) { + // Deploy Fluent Bit Collector ClusterRoleBinding + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, crb, r.mutate(crb, &co)); err != nil { return ctrl.Result{}, err } - if err := r.Create(ctx, saObj); err != nil && !errors.IsAlreadyExists(err) { - return ctrl.Result{}, err - } - if err := r.Create(ctx, bindingObj); err != nil && !errors.IsAlreadyExists(err) { - return ctrl.Result{}, err - } - - // Deploy Fluent Bit Statefuset - sts := operator.MakefbStatefuset(co) - if err := ctrl.SetControllerReference(&co, sts, r.Scheme); err != nil { + // Deploy Fluent Bit Collector ServiceAccount + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sa, r.mutate(sa, &co)); err != nil { return ctrl.Result{}, err } - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sts, r.mutate(sts, co)); err != nil { + // Deploy Fluent Bit Collector Statefulset + sts := operator.MakefbStatefulset(co) + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sts, r.mutate(sts, &co)); err != nil { return ctrl.Result{}, err } - // Deploy collector Service + // Deploy Fluent Bit Collector Service if !co.Spec.DisableService { - svc := operator.MakeCollecotrService(co) - if err := ctrl.SetControllerReference(&co, svc, r.Scheme); err != nil { - return ctrl.Result{}, err - } - - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, svc, r.mutate(svc, co)); err != nil { + svc := operator.MakeCollectorService(co) + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, svc, r.mutate(svc, &co)); err != nil { return ctrl.Result{}, err } } @@ -133,35 +124,56 @@ func (r *CollectorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } -func (r *CollectorReconciler) mutate(obj client.Object, co fluentbitv1alpha2.Collector) controllerutil.MutateFn { +func (r *CollectorReconciler) mutate(obj client.Object, co *fluentbitv1alpha2.Collector) controllerutil.MutateFn { switch o := obj.(type) { + case *rbacv1.ClusterRole: + expected, _, _ := operator.MakeRBACObjects(co.Name, co.Namespace, "collector", co.Spec.RBACRules, co.Spec.ServiceAccountAnnotations) + + return func() error { + o.Rules = expected.Rules + return nil + } + case *corev1.ServiceAccount: + _, expected, _ := operator.MakeRBACObjects(co.Name, co.Namespace, "collector", co.Spec.RBACRules, co.Spec.ServiceAccountAnnotations) + + return func() error { + o.Annotations = expected.Annotations + if err := ctrl.SetControllerReference(co, o, r.Scheme); err != nil { + return err + } + return nil + } + case *rbacv1.ClusterRoleBinding: + _, _, expected := operator.MakeRBACObjects(co.Name, co.Namespace, "collector", co.Spec.RBACRules, co.Spec.ServiceAccountAnnotations) + + return func() error { + o.RoleRef = expected.RoleRef + o.Subjects = expected.Subjects + return nil + } case *appsv1.StatefulSet: - expected := operator.MakefbStatefuset(co) + expected := operator.MakefbStatefulset(*co) return func() error { o.Labels = expected.Labels - o.Annotations = expected.Annotations o.Spec = expected.Spec - o.SetOwnerReferences(nil) - if err := ctrl.SetControllerReference(&co, o, r.Scheme); err != nil { + if err := ctrl.SetControllerReference(co, o, r.Scheme); err != nil { return err } return nil } case *corev1.Service: - expected := operator.MakeCollecotrService(co) + expected := operator.MakeCollectorService(*co) return func() error { o.Labels = expected.Labels o.Spec.Selector = expected.Spec.Selector o.Spec.Ports = expected.Spec.Ports - o.SetOwnerReferences(nil) - if err := ctrl.SetControllerReference(&co, o, r.Scheme); err != nil { + if err := ctrl.SetControllerReference(co, o, r.Scheme); err != nil { return err } return nil } - default: } @@ -169,18 +181,38 @@ func (r *CollectorReconciler) mutate(obj client.Object, co fluentbitv1alpha2.Col } func (r *CollectorReconciler) delete(ctx context.Context, co *fluentbitv1alpha2.Collector) error { - var sa corev1.ServiceAccount + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: co.Name, + Namespace: co.Namespace, + }, + } if err := r.Delete(ctx, &sa); err != nil && !errors.IsNotFound(err) { return err } + // TODO: clusterrole, clusterrolebinding - var svc corev1.Service - if err := r.Delete(ctx, &svc); err != nil && !errors.IsNotFound(err) { + sts := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: co.Name, + Namespace: co.Namespace, + }, + } + if err := r.Delete(ctx, &sts); err != nil && !errors.IsNotFound(err) { return err } - var sts appsv1.StatefulSet - if err := r.Delete(ctx, &sts); err != nil && !errors.IsNotFound(err) { + svcName := co.Name + if co.Spec.Service.Name != "" { + svcName = co.Spec.Service.Name + } + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: co.Namespace, + }, + } + if err := r.Delete(ctx, &svc); err != nil && !errors.IsNotFound(err) { return err } diff --git a/controllers/fluent_controller_finalizer.go b/controllers/fluent_controller_finalizer.go index 26756f9c7..f0ac6c84c 100644 --- a/controllers/fluent_controller_finalizer.go +++ b/controllers/fluent_controller_finalizer.go @@ -2,10 +2,12 @@ package controllers import ( "context" + rbacv1 "k8s.io/api/rbac/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -64,33 +66,34 @@ func (r *FluentdReconciler) handleFinalizer(ctx context.Context, instance *fluen } func (r *FluentdReconciler) delete(ctx context.Context, fd *fluentdv1alpha1.Fluentd) error { - var sa corev1.ServiceAccount - err := r.Get(ctx, client.ObjectKey{Namespace: fd.Namespace, Name: fd.Name}, &sa) - if err == nil { - if err := r.Delete(ctx, &sa); err != nil && !errors.IsNotFound(err) { - return err - } - } else if !errors.IsNotFound(err) { + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: fd.Name, + Namespace: fd.Namespace, + }, + } + if err := r.Delete(ctx, &sa); err != nil && !errors.IsNotFound(err) { return err } + // TODO: clusterrole, clusterrolebinding - var svc corev1.Service - err = r.Get(ctx, client.ObjectKey{Namespace: fd.Namespace, Name: fd.Name}, &svc) - if err == nil { - if err := r.Delete(ctx, &svc); err != nil && !errors.IsNotFound(err) { - return err - } - } else if !errors.IsNotFound(err) { + sts := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fd.Name, + Namespace: fd.Namespace, + }, + } + if err := r.Delete(ctx, &sts); err != nil && !errors.IsNotFound(err) { return err } - var sts appsv1.StatefulSet - err = r.Get(ctx, client.ObjectKey{Namespace: fd.Namespace, Name: fd.Name}, &sts) - if err == nil { - if err := r.Delete(ctx, &sts); err != nil && !errors.IsNotFound(err) { - return err - } - } else if !errors.IsNotFound(err) { + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fd.Name, + Namespace: fd.Namespace, + }, + } + if err := r.Delete(ctx, &svc); err != nil && !errors.IsNotFound(err) { return err } @@ -99,13 +102,38 @@ func (r *FluentdReconciler) delete(ctx context.Context, fd *fluentdv1alpha1.Flue func (r *FluentdReconciler) mutate(obj client.Object, fd *fluentdv1alpha1.Fluentd) controllerutil.MutateFn { switch o := obj.(type) { + case *rbacv1.ClusterRole: + expected, _, _ := operator.MakeRBACObjects(fd.Name, fd.Namespace, "fluentd", fd.Spec.RBACRules, fd.Spec.ServiceAccountAnnotations) + + return func() error { + o.Rules = expected.Rules + return nil + } + case *corev1.ServiceAccount: + _, expected, _ := operator.MakeRBACObjects(fd.Name, fd.Namespace, "fluentd", fd.Spec.RBACRules, fd.Spec.ServiceAccountAnnotations) + + return func() error { + o.Labels = expected.Labels + o.Annotations = expected.Annotations + if err := ctrl.SetControllerReference(fd, o, r.Scheme); err != nil { + return err + } + return nil + } + case *rbacv1.ClusterRoleBinding: + _, _, expected := operator.MakeRBACObjects(fd.Name, fd.Namespace, "fluentd", fd.Spec.RBACRules, fd.Spec.ServiceAccountAnnotations) + + return func() error { + o.RoleRef = expected.RoleRef + o.Subjects = expected.Subjects + return nil + } case *appsv1.StatefulSet: expected := operator.MakeStatefulset(*fd) return func() error { o.Labels = expected.Labels o.Spec = expected.Spec - o.SetOwnerReferences(nil) if err := ctrl.SetControllerReference(fd, o, r.Scheme); err != nil { return err } @@ -118,13 +146,11 @@ func (r *FluentdReconciler) mutate(obj client.Object, fd *fluentdv1alpha1.Fluent o.Labels = expected.Labels o.Spec.Selector = expected.Spec.Selector o.Spec.Ports = expected.Spec.Ports - o.SetOwnerReferences(nil) if err := ctrl.SetControllerReference(fd, o, r.Scheme); err != nil { return err } return nil } - default: } diff --git a/controllers/fluentbit_controller.go b/controllers/fluentbit_controller.go index 368ebe844..9257c2b4d 100644 --- a/controllers/fluentbit_controller.go +++ b/controllers/fluentbit_controller.go @@ -97,45 +97,33 @@ func (r *FluentBitReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Install RBAC resources for the filter plugin kubernetes - var rbacObj, saObj, bindingObj client.Object + var role, sa, binding client.Object if r.Namespaced { - rbacObj, saObj, bindingObj = operator.MakeScopedRBACObjects(fb.Name, fb.Namespace, fb.Spec.ServiceAccountAnnotations) + role, sa, binding = operator.MakeScopedRBACObjects(fb.Name, fb.Namespace, fb.Spec.ServiceAccountAnnotations) } else { - rbacObj, saObj, bindingObj = operator.MakeRBACObjects(fb.Name, fb.Namespace, "fluent-bit", fb.Spec.RBACRules, fb.Spec.ServiceAccountAnnotations) + role, sa, binding = operator.MakeRBACObjects(fb.Name, fb.Namespace, "fluent-bit", fb.Spec.RBACRules, fb.Spec.ServiceAccountAnnotations) } - // Set ServiceAccount's owner to this fluentbit - if err := ctrl.SetControllerReference(&fb, saObj, r.Scheme); err != nil { + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, role, r.mutate(role, &fb)); err != nil { return ctrl.Result{}, err } - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, rbacObj, r.mutate(rbacObj, fb)); err != nil { + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sa, r.mutate(sa, &fb)); err != nil { return ctrl.Result{}, err } - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, saObj, r.mutate(saObj, fb)); err != nil { - return ctrl.Result{}, err - } - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, bindingObj, r.mutate(bindingObj, fb)); err != nil { + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, binding, r.mutate(binding, &fb)); err != nil { return ctrl.Result{}, err } // Deploy Fluent Bit DaemonSet logPath := r.getContainerLogPath(fb) ds := operator.MakeDaemonSet(fb, logPath) - if err := ctrl.SetControllerReference(&fb, ds, r.Scheme); err != nil { - return ctrl.Result{}, err - } - - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, ds, r.mutate(ds, fb)); err != nil { + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, ds, r.mutate(ds, &fb)); err != nil { return ctrl.Result{}, err } // Deploy FluentBit Service if !fb.Spec.DisableService { svc := operator.MakeFluentbitService(fb) - if err := ctrl.SetControllerReference(&fb, svc, r.Scheme); err != nil { - return ctrl.Result{}, err - } - - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, svc, r.mutate(svc, fb)); err != nil { + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, svc, r.mutate(svc, &fb)); err != nil { return ctrl.Result{}, err } } @@ -153,33 +141,31 @@ func (r *FluentBitReconciler) getContainerLogPath(fb fluentbitv1alpha2.FluentBit } } -func (r *FluentBitReconciler) mutate(obj client.Object, fb fluentbitv1alpha2.FluentBit) controllerutil.MutateFn { - logPath := r.getContainerLogPath(fb) +func (r *FluentBitReconciler) mutate(obj client.Object, fb *fluentbitv1alpha2.FluentBit) controllerutil.MutateFn { + logPath := r.getContainerLogPath(*fb) switch o := obj.(type) { case *appsv1.DaemonSet: - expected := operator.MakeDaemonSet(fb, logPath) + expected := operator.MakeDaemonSet(*fb, logPath) return func() error { o.Labels = expected.Labels o.Annotations = expected.Annotations o.Spec = expected.Spec - o.SetOwnerReferences(nil) - if err := ctrl.SetControllerReference(&fb, o, r.Scheme); err != nil { + if err := ctrl.SetControllerReference(fb, o, r.Scheme); err != nil { return err } return nil } case *corev1.Service: - expected := operator.MakeFluentbitService(fb) + expected := operator.MakeFluentbitService(*fb) return func() error { o.Labels = expected.Labels o.Spec.Selector = expected.Spec.Selector o.Spec.Ports = expected.Spec.Ports - o.SetOwnerReferences(nil) - if err := ctrl.SetControllerReference(&fb, o, r.Scheme); err != nil { + if err := ctrl.SetControllerReference(fb, o, r.Scheme); err != nil { return err } return nil @@ -188,25 +174,24 @@ func (r *FluentBitReconciler) mutate(obj client.Object, fb fluentbitv1alpha2.Flu expected, _, _ := operator.MakeScopedRBACObjects(fb.Name, fb.Namespace, fb.Spec.ServiceAccountAnnotations) return func() error { - o.Name = expected.Name o.Rules = expected.Rules + if err := ctrl.SetControllerReference(fb, o, r.Scheme); err != nil { + return err + } return nil } case *rbacv1.ClusterRole: expected, _, _ := operator.MakeRBACObjects(fb.Name, fb.Namespace, "fluent-bit", fb.Spec.RBACRules, fb.Spec.ServiceAccountAnnotations) return func() error { - o.Name = expected.Name o.Rules = expected.Rules return nil } - case *corev1.ServiceAccount: _, expected, _ := operator.MakeScopedRBACObjects(fb.Name, fb.Namespace, fb.Spec.ServiceAccountAnnotations) return func() error { - o.Name = expected.Name o.Annotations = expected.Annotations - if err := ctrl.SetControllerReference(&fb, o, r.Scheme); err != nil { + if err := ctrl.SetControllerReference(fb, o, r.Scheme); err != nil { return err } return nil @@ -214,20 +199,20 @@ func (r *FluentBitReconciler) mutate(obj client.Object, fb fluentbitv1alpha2.Flu case *rbacv1.RoleBinding: _, _, expected := operator.MakeScopedRBACObjects(fb.Name, fb.Namespace, fb.Spec.ServiceAccountAnnotations) return func() error { - o.Name = expected.Name o.Subjects = expected.Subjects o.RoleRef = expected.RoleRef + if err := ctrl.SetControllerReference(fb, o, r.Scheme); err != nil { + return err + } return nil } case *rbacv1.ClusterRoleBinding: _, _, expected := operator.MakeRBACObjects(fb.Name, fb.Namespace, "fluent-bit", fb.Spec.RBACRules, fb.Spec.ServiceAccountAnnotations) return func() error { - o.Name = expected.Name o.Subjects = expected.Subjects o.RoleRef = expected.RoleRef return nil } - default: } @@ -235,23 +220,57 @@ func (r *FluentBitReconciler) mutate(obj client.Object, fb fluentbitv1alpha2.Flu } func (r *FluentBitReconciler) delete(ctx context.Context, fb *fluentbitv1alpha2.FluentBit) error { - var sa corev1.ServiceAccount - err := r.Get(ctx, client.ObjectKey{Namespace: fb.Namespace, Name: fb.Name}, &sa) - if err == nil { - if err := r.Delete(ctx, &sa); err != nil && !errors.IsNotFound(err) { - return err - } - } else if !errors.IsNotFound(err) { + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: fb.Name, + Namespace: fb.Namespace, + }, + } + if err := r.Delete(ctx, &sa); err != nil && !errors.IsNotFound(err) { return err } - var ds appsv1.DaemonSet - err = r.Get(ctx, client.ObjectKey{Namespace: fb.Namespace, Name: fb.Name}, &ds) - if err == nil { - if err := r.Delete(ctx, &ds); err != nil && !errors.IsNotFound(err) { + if r.Namespaced { + roleName, _, roleBindingName := operator.MakeScopedRBACNames(fb.Name) + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: fb.Namespace, + }, + } + if err := r.Delete(ctx, &role); err != nil && !errors.IsNotFound(err) { + return err + } + + rolebinding := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleBindingName, + Namespace: fb.Namespace, + }, + } + if err := r.Delete(ctx, &rolebinding); err != nil && !errors.IsNotFound(err) { return err } - } else if !errors.IsNotFound(err) { + } + // TODO: clusterrole, clusterrolebinding + + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fb.Name, + Namespace: fb.Namespace, + }, + } + if err := r.Delete(ctx, &ds); err != nil && !errors.IsNotFound(err) { + return err + } + + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fb.Name, + Namespace: fb.Namespace, + }, + } + if err := r.Delete(ctx, &svc); err != nil && !errors.IsNotFound(err) { return err } diff --git a/controllers/fluentd_controller.go b/controllers/fluentd_controller.go index 99535898c..fada5638c 100644 --- a/controllers/fluentd_controller.go +++ b/controllers/fluentd_controller.go @@ -91,39 +91,29 @@ func (r *FluentdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } // Install RBAC resources for the filter plugin kubernetes - var rbacObj, saObj, bindingObj = operator.MakeRBACObjects(fd.Name, fd.Namespace, "fluentd", fd.Spec.RBACRules, fd.Spec.ServiceAccountAnnotations) - - // Set ServiceAccount's owner to this Fluentd - if err := ctrl.SetControllerReference(&fd, saObj, r.Scheme); err != nil { - return ctrl.Result{}, err - } - if err := r.Create(ctx, rbacObj); err != nil && !errors.IsAlreadyExists(err) { + cr, sa, crb := operator.MakeRBACObjects(fd.Name, fd.Namespace, "fluentd", fd.Spec.RBACRules, fd.Spec.ServiceAccountAnnotations) + // Deploy Fluentd ClusterRole + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, cr, r.mutate(cr, &fd)); err != nil { return ctrl.Result{}, err } - if err := r.Create(ctx, saObj); err != nil && !errors.IsAlreadyExists(err) { + // Deploy Fluentd ClusterRoleBinding + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, crb, r.mutate(crb, &fd)); err != nil { return ctrl.Result{}, err } - if err := r.Create(ctx, bindingObj); err != nil && !errors.IsAlreadyExists(err) { + // Deploy Fluentd ServiceAccount + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sa, r.mutate(sa, &fd)); err != nil { return ctrl.Result{}, err } // Deploy Fluentd Statefulset - dp := operator.MakeStatefulset(fd) - if err := ctrl.SetControllerReference(&fd, dp, r.Scheme); err != nil { - return ctrl.Result{}, err - } - - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, dp, r.mutate(dp, &fd)); err != nil { + sts := operator.MakeStatefulset(fd) + if _, err := controllerutil.CreateOrPatch(ctx, r.Client, sts, r.mutate(sts, &fd)); err != nil { return ctrl.Result{}, err } // Deploy Fluentd Service if !fd.Spec.DisableService { svc := operator.MakeFluentdService(fd) - if err := ctrl.SetControllerReference(&fd, svc, r.Scheme); err != nil { - return ctrl.Result{}, err - } - if _, err := controllerutil.CreateOrPatch(ctx, r.Client, svc, r.mutate(svc, &fd)); err != nil { return ctrl.Result{}, err } diff --git a/controllers/fluentdconfig_controller.go b/controllers/fluentdconfig_controller.go index 439d1a175..a8e19c451 100644 --- a/controllers/fluentdconfig_controller.go +++ b/controllers/fluentdconfig_controller.go @@ -55,7 +55,7 @@ const ( SYSTEM = `# Enable RPC endpoint rpc_endpoint 127.0.0.1:24444 - log_level info + log_level %s workers %d ` @@ -104,7 +104,7 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques _ = r.Log.WithValues("fluentdconfig", req.NamespacedName) var fluentdList fluentdv1alpha1.FluentdList - // List all fluentd instances to bind the generated runtime configuration to each fluentd. + // List all fluentd instances to bind the generated runtime configuration to each fluentd if err := r.List(ctx, &fluentdList); err != nil { if errors.IsNotFound(err) { r.Log.Info("can not find fluentd CR definition.") @@ -113,29 +113,54 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + // Loop over all Fluentd CRs for _, fd := range fluentdList.Items { // Get the config selector in this fluentd instance fdSelector, err := metav1.LabelSelectorAsSelector(&fd.Spec.FluentdCfgSelector) if err != nil { - // Patch this fluentd instance if the selectors exsit errors + // Patch this fluentd instance if the selectors exit with errors if err := r.PatchObjects(ctx, &fd, fluentdv1alpha1.InactiveState, err.Error()); err != nil { return ctrl.Result{}, err } continue } - - // A secret loader supports LoadSecret method to parse the targeted secret. + // A secret loader supports LoadSecret method to parse the targeted secret sl := plugins.NewSecretLoader(r.Client, fd.Namespace, r.Log) // gpr acts as a global resource to store the related plugin resources gpr := fluentdv1alpha1.NewGlobalPluginResources("main") // Each cluster/namespace scope fluentd configs will generate their own filters/outputs plugins with their own cfgId/cfgLabel, - // and they will finally be combined into one fluentd config file. + // and they will finally be combined into one fluentd config file gpr.CombineGlobalInputsPlugins(sl, fd.Spec.GlobalInputs) - // globalCfgLabels stores cfgLabels, the same cfg label is not allowed. + // Default Output and filter + // list all namespaced CRs + filters, outputs, err := r.ListNamespacedLevelResources(ctx, fd.Namespace, fd.Spec.DefaultFilterSelector, fd.Spec.DefaultOutputSelector) + if err != nil { + r.Log.Info("List namespace level resources failed", "config", "default", "err", err.Error()) + return ctrl.Result{}, err + } + if len(filters) > 0 || len(outputs) > 0 { + // Combine the namespaced filter/output pluginstores in this fluentd config + cfgResouces, errs := gpr.PatchAndFilterNamespacedLevelResources(sl, fmt.Sprintf("%s-%s-%s", fd.Kind, fd.Namespace, fd.Name), filters, outputs) + if len(errs) > 0 { + r.Log.Info("Patch and filter namespace level resources failed", "config", "default", "err", strings.Join(errs, ",")) + return ctrl.Result{}, fmt.Errorf(strings.Join(errs, ",")) + } + + // WithCfgResources will collect all plugins to generate main config + err = gpr.WithCfgResources("@default", cfgResouces) + if err != nil { + r.Log.Info("Combine resources failed", "config", "default", "err", err.Error()) + return ctrl.Result{}, err + } + // Add the default route to the main routing plugin + gpr.MainRouterPlugins.InsertPairs("default_route", "@default") + } + + // globalCfgLabels stores cfgLabels, the same cfg label is not allowed globalCfgLabels := make(map[string]bool) // Combine the resources matching the FluentdClusterConfigs selector into gpr @@ -152,7 +177,7 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Combine the resources matching the FluentdConfigs selector into gpr var cfgs fluentdv1alpha1.FluentdConfigList - // Use fluentd selector to match the cluster config. + // Use fluentd selector to match the cluster config if err := r.List(ctx, &cfgs, client.MatchingLabelsSelector{Selector: fdSelector}); err != nil { if !errors.IsNotFound(err) { return ctrl.Result{}, err @@ -180,13 +205,13 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques workers = *fd.Spec.Workers } - // Create or update the secret of the fluentd instance in its namespace. + // Create or update the secret of the fluentd instance in its namespace mainAppCfg, err = gpr.RenderMainConfig(bool(workers > 1)) if err != nil { return ctrl.Result{}, err } - systemCfg = fmt.Sprintf(SYSTEM, workers) + systemCfg = fmt.Sprintf(SYSTEM, fd.Spec.LogLevel, workers) } secName := fmt.Sprintf("%s-config", fd.Name) @@ -205,7 +230,7 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques FluentdSecretSystemKey: []byte(systemCfg), FluentdSecretLogKey: []byte(FLUENTD_LOG), } - // The current fd owns the namespaced secret. + // The current fd owns the namespaced secret sec.SetOwnerReferences(nil) if err := ctrl.SetControllerReference(&fd, sec, r.Scheme); err != nil { return err @@ -223,12 +248,16 @@ func (r *FluentdConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // ClusterCfgsForFluentd combines all cluster cfgs selected by this fd func (r *FluentdConfigReconciler) ClusterCfgsForFluentd( - ctx context.Context, clustercfgs fluentdv1alpha1.ClusterFluentdConfigList, sl plugins.SecretLoader, gpr *fluentdv1alpha1.PluginResources, - globalCfgLabels map[string]bool) error { + ctx context.Context, + clustercfgs fluentdv1alpha1.ClusterFluentdConfigList, + sl plugins.SecretLoader, + gpr *fluentdv1alpha1.PluginResources, + globalCfgLabels map[string]bool, +) error { for _, cfg := range clustercfgs.Items { - // Build the inner router for this cfg. + // Build the inner router for this cfg and append it to the MainRouter // Each cfg is a workflow. cfgRouter, err := gpr.BuildCfgRouter(&cfg) if err != nil { @@ -239,8 +268,9 @@ func (r *FluentdConfigReconciler) ClusterCfgsForFluentd( continue } - + // Set the route label which was calculated by a sub function of BuildCfgRoute cfgRouterLabel := fmt.Sprint(*cfgRouter.Label) + if err := r.registerCfgLabel(cfgRouterLabel, globalCfgLabels); err != nil { r.Log.Info("Register fluentd config label failed", "config", cfg.Name, "err", err.Error()) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, err.Error()); err != nil { @@ -251,7 +281,7 @@ func (r *FluentdConfigReconciler) ClusterCfgsForFluentd( } // list all cluster CRs - clusterfilters, clusteroutputs, err := r.ListClusterLevelResources(ctx, cfg.GetCfgId(), cfg.Spec.ClusterFilterSelector, cfg.Spec.ClusterOutputSelector) + clusterfilters, clusteroutputs, err := r.ListClusterLevelResources(ctx, cfg.Spec.ClusterFilterSelector, cfg.Spec.ClusterOutputSelector) if err != nil { r.Log.Info("List cluster level resources failed", "config", cfg.Name, "err", err.Error()) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, err.Error()); err != nil { @@ -261,10 +291,10 @@ func (r *FluentdConfigReconciler) ClusterCfgsForFluentd( continue } - // Combine the filter/output pluginstores in this fluentd config. + // Combine the filter/output pluginstores in this fluentd config cfgResouces, errs := gpr.PatchAndFilterClusterLevelResources(sl, cfg.GetCfgId(), clusterfilters, clusteroutputs) if len(errs) > 0 { - r.Log.Info("Patch and filter cluster level resources failed", "config", cfg.Name, "err", err.Error()) + r.Log.Info("Patch and filter cluster level resources failed", "config", cfg.Name, "err", strings.Join(errs, ",")) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, strings.Join(errs, ", ")); err != nil { return err } @@ -272,7 +302,7 @@ func (r *FluentdConfigReconciler) ClusterCfgsForFluentd( continue } - // WithCfgResources will collect all plugins to generate main config. + // WithCfgResources will collect all plugins to generate main config var msg string err = gpr.WithCfgResources(cfgRouterLabel, cfgResouces) if err != nil { @@ -295,7 +325,7 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen gpr *fluentdv1alpha1.PluginResources, globalCfgLabels map[string]bool) error { for _, cfg := range cfgs.Items { - // build the inner router for this cfg. + // Build the inner router for this cfg and append it to the MainRouter cfgRouter, err := gpr.BuildCfgRouter(&cfg) if err != nil { r.Log.Info("Build router failed", "config", cfg.Name, "err", err.Error()) @@ -306,7 +336,7 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen continue } - // register routeLabel, the same routelabel is not allowed. + // register routeLabel and fail if it is already present cfgRouterLabel := fmt.Sprint(*cfgRouter.Label) if err := r.registerCfgLabel(cfgRouterLabel, globalCfgLabels); err != nil { r.Log.Info("Register fluentd config label failed", "config", cfg.Name, "err", err.Error()) @@ -318,7 +348,7 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen } // list all cluster CRs - clusterfilters, clusteroutputs, err := r.ListClusterLevelResources(ctx, cfg.GetCfgId(), cfg.Spec.ClusterFilterSelector, cfg.Spec.ClusterOutputSelector) + clusterfilters, clusteroutputs, err := r.ListClusterLevelResources(ctx, cfg.Spec.ClusterFilterSelector, cfg.Spec.ClusterOutputSelector) if err != nil { r.Log.Info("List cluster level resources failed", "config", cfg.Name, "err", err.Error()) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, err.Error()); err != nil { @@ -329,7 +359,7 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen } // list all namespaced CRs - filters, outputs, err := r.ListNamespacedLevelResources(ctx, cfg.Namespace, cfg.GetCfgId(), cfg.Spec.FilterSelector, cfg.Spec.OutputSelector) + filters, outputs, err := r.ListNamespacedLevelResources(ctx, cfg.Namespace, cfg.Spec.FilterSelector, cfg.Spec.OutputSelector) if err != nil { r.Log.Info("List namespace level resources failed", "config", cfg.Name, "err", err.Error()) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, err.Error()); err != nil { @@ -339,10 +369,10 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen continue } - // Combine the cluster filter/output pluginstores in this fluentd config. + // Combine the cluster filter/output pluginstores in this fluentd config clustercfgResouces, errs := gpr.PatchAndFilterClusterLevelResources(sl, cfg.GetCfgId(), clusterfilters, clusteroutputs) if len(errs) > 0 { - r.Log.Info("Patch and filter cluster level resources failed", "config", cfg.Name, "err", err.Error()) + r.Log.Info("Patch and filter cluster level resources failed", "config", cfg.Name, "err", strings.Join(errs, ",")) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, strings.Join(errs, ", ")); err != nil { return err } @@ -350,10 +380,10 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen continue } - // Combine the namespaced filter/output pluginstores in this fluentd config. + // Combine the namespaced filter/output pluginstores in this fluentd config cfgResouces, errs := gpr.PatchAndFilterNamespacedLevelResources(sl, cfg.GetCfgId(), filters, outputs) if len(errs) > 0 { - r.Log.Info("Patch and filter namespace level resources failed", "config", cfg.Name, "err", err.Error()) + r.Log.Info("Patch and filter namespace level resources failed", "config", cfg.Name, "err", strings.Join(errs, ",")) if err = r.PatchObjects(ctx, &cfg, fluentdv1alpha1.InvalidState, strings.Join(errs, ", ")); err != nil { return err } @@ -364,7 +394,7 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen cfgResouces.FilterPlugins = append(cfgResouces.FilterPlugins, clustercfgResouces.FilterPlugins...) cfgResouces.OutputPlugins = append(cfgResouces.OutputPlugins, clustercfgResouces.OutputPlugins...) - // WithCfgResources will collect all plugins to generate main config. + // WithCfgResources will collect all plugins to generate main config var msg string err = gpr.WithCfgResources(cfgRouterLabel, cfgResouces) if err != nil { @@ -383,7 +413,9 @@ func (r *FluentdConfigReconciler) CfgsForFluentd(ctx context.Context, cfgs fluen } func (r *FluentdConfigReconciler) CheckAllState( - cfgs fluentdv1alpha1.FluentdConfigList, clustercfgs fluentdv1alpha1.ClusterFluentdConfigList) (fluentdv1alpha1.StatusState, string) { + cfgs fluentdv1alpha1.FluentdConfigList, + clustercfgs fluentdv1alpha1.ClusterFluentdConfigList, +) (fluentdv1alpha1.StatusState, string) { invalidCfgIds := make([]string, 0, len(cfgs.Items)+len(cfgs.Items)) for _, cfg := range cfgs.Items { @@ -412,18 +444,22 @@ func (r *FluentdConfigReconciler) CheckAllState( // registerCfgLabel registers a cfglabel for this clustercfg/cfg func (r *FluentdConfigReconciler) registerCfgLabel(cfgLabel string, globalCfgLabels map[string]bool) error { // cfgRouterLabel contains the important information for this cfg. + // check if the calculated cfgLabel is already present if ok := globalCfgLabels[cfgLabel]; ok { return fmt.Errorf("the current configuration already exists: %s", cfgLabel) } - // register the cfg labels, the same cfg labels is not allowed. + // register the cfg labels, the same cfg labels is not allowed globalCfgLabels[cfgLabel] = true return nil } -func (r *FluentdConfigReconciler) ListClusterLevelResources(ctx context.Context, cfgId string, - filterSelector, outputSelector *metav1.LabelSelector) ([]fluentdv1alpha1.ClusterFilter, []fluentdv1alpha1.ClusterOutput, error) { - // List all filters matching the label selector. +func (r *FluentdConfigReconciler) ListClusterLevelResources( + ctx context.Context, + filterSelector, + outputSelector *metav1.LabelSelector, +) ([]fluentdv1alpha1.ClusterFilter, []fluentdv1alpha1.ClusterOutput, error) { + // List all filters matching the label selector var clusterfilters fluentdv1alpha1.ClusterFilterList if filterSelector != nil { selector, err := metav1.LabelSelectorAsSelector(filterSelector) @@ -435,7 +471,7 @@ func (r *FluentdConfigReconciler) ListClusterLevelResources(ctx context.Context, } } - // List all outputs matching the label selector. + // List all outputs matching the label selector var clusteroutputs fluentdv1alpha1.ClusterOutputList if outputSelector != nil { selector, err := metav1.LabelSelectorAsSelector(outputSelector) @@ -449,8 +485,12 @@ func (r *FluentdConfigReconciler) ListClusterLevelResources(ctx context.Context, return clusterfilters.Items, clusteroutputs.Items, nil } -func (r *FluentdConfigReconciler) ListNamespacedLevelResources(ctx context.Context, namespace, cfgId string, - filterSelector, outputSelector *metav1.LabelSelector) ([]fluentdv1alpha1.Filter, []fluentdv1alpha1.Output, error) { +func (r *FluentdConfigReconciler) ListNamespacedLevelResources( + ctx context.Context, + namespace string, + filterSelector, + outputSelector *metav1.LabelSelector, +) ([]fluentdv1alpha1.Filter, []fluentdv1alpha1.Output, error) { // List and patch the related cluster CRs var filters fluentdv1alpha1.FilterList if filterSelector != nil { @@ -463,7 +503,7 @@ func (r *FluentdConfigReconciler) ListNamespacedLevelResources(ctx context.Conte } } - // List all outputs matching the label selector. + // List all outputs matching the label selector var outputs fluentdv1alpha1.OutputList if outputSelector != nil { selector, err := metav1.LabelSelectorAsSelector(outputSelector) @@ -505,16 +545,16 @@ func (r *FluentdConfigReconciler) PatchObjects(ctx context.Context, obj client.O return nil } -// SetupWithManager sets up the controller with the Manager. +// SetupWithManager sets up the controller with the Manager func (r *FluentdConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, fluentdOwnerKey, func(rawObj client.Object) []string { - // grab the job object, extract the owner. + // grab the job object, extract the owner sa := rawObj.(*corev1.ServiceAccount) owner := metav1.GetControllerOf(sa) if owner == nil { return nil } - // Make sure it's a Fluentd. If so, return it. + // Make sure it's a Fluentd. If so, return it if owner.APIVersion != fluentdApiGVStr || owner.Kind != "Fluentd" { return nil } diff --git a/docs/fluentbit.md b/docs/fluentbit.md index 5e5643f85..77addfd31 100644 --- a/docs/fluentbit.md +++ b/docs/fluentbit.md @@ -214,6 +214,7 @@ CollectorSpec defines the desired state of FluentBit | bufferPath | The path where buffer chunks are stored. | *string | | ports | Ports represents the pod's ports. | []corev1.ContainerPort | | service | Service represents configurations on the fluent-bit service. | CollectorService | +| schedulerName | SchedulerName represents the desired scheduler for the Fluentbit collector pods | string | [Back to TOC](#table-of-contents) # Decorder @@ -385,6 +386,7 @@ FluentBitSpec defines the desired state of FluentBit | serviceAccountAnnotations | Annotations to add to the Fluentbit service account | map[string]string | | labels | Labels to add to each FluentBit pod | map[string]string | | securityContext | SecurityContext holds pod-level security attributes and common container settings. | *corev1.PodSecurityContext | +| containerSecurityContext | ContainerSecurityContext holds container-level security attributes. | *corev1.SecurityContext | | hostNetwork | Host networking is requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. | bool | | envVars | EnvVars represent environment variables that can be passed to fluentbit pods. | []corev1.EnvVar | | livenessProbe | LivenessProbe represents the pod's liveness probe. | *corev1.Probe | @@ -395,6 +397,7 @@ FluentBitSpec defines the desired state of FluentBit | dnsPolicy | Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. | corev1.DNSPolicy | | metricsPort | MetricsPort is the port used by the metrics server. If this option is set, HttpPort from ClusterFluentBitConfig needs to match this value. Default is 2020. | int32 | | service | Service represents configurations on the fluent-bit service. | FluentBitService | +| schedulerName | SchedulerName represents the desired scheduler for fluent-bit pods. | string | [Back to TOC](#table-of-contents) # InputSpec @@ -478,11 +481,13 @@ OutputSpec defines the desired state of ClusterOutput | influxDB | InfluxDB defines InfluxDB Output configuration. | *[output.InfluxDB](plugins/output/influxdb.md) | | datadog | DataDog defines DataDog Output configuration. | *[output.DataDog](plugins/output/datadog.md) | | firehose | Firehose defines Firehose Output configuration. | *[output.Firehose](plugins/output/firehose.md) | +| kinesis | Kinesis defines Kinesis Output configuration. | *[output.Kinesis](plugins/output/kinesis.md) | | stackdriver | Stackdriver defines Stackdriver Output Configuration | *[output.Stackdriver](plugins/output/stackdriver.md) | | splunk | Splunk defines Splunk Output Configuration | *[output.Splunk](plugins/output/splunk.md) | | opensearch | OpenSearch defines OpenSearch Output configuration. | *[output.OpenSearch](plugins/output/opensearch.md) | | opentelemetry | OpenTelemetry defines OpenTelemetry Output configuration. | *[output.OpenTelemetry](plugins/output/opentelemetry.md) | | prometheusRemoteWrite | PrometheusRemoteWrite_types defines Prometheus Remote Write configuration. | *[output.PrometheusRemoteWrite](plugins/output/prometheusremotewrite.md) | +| s3 | S3 defines S3 Output configuration. | *[output.S3](plugins/output/s3.md) | | customPlugin | CustomPlugin defines Custom Output configuration. | *custom.CustomPlugin | [Back to TOC](#table-of-contents) diff --git a/docs/fluentd.md b/docs/fluentd.md index 74de9b1bb..298825f92 100644 --- a/docs/fluentd.md +++ b/docs/fluentd.md @@ -286,9 +286,12 @@ FluentdSpec defines the desired state of Fluentd | Field | Description | Scheme | | ----- | ----------- | ------ | | globalInputs | Fluentd global inputs. | [][[input.Input](plugins/input/input.md)](plugins/[input/input](plugins/input/input/md).md) | +| defaultFilterSelector | Select cluster filter plugins used to filter for the default cluster output | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta) | +| defaultOutputSelector | Select cluster output plugins used to send all logs that did not match a route to the matching outputs | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta) | | disableService | By default will build the related service according to the globalinputs definition. | bool | | replicas | Numbers of the Fluentd instance | *int32 | | workers | Numbers of the workers in Fluentd instance | *int32 | +| logLevel | Global logging verbosity | string | | image | Fluentd image. | string | | args | Fluentd Watcher command line arguments. | []string | | envVars | EnvVars represent environment variables that can be passed to fluentd pods. | []corev1.EnvVar | @@ -309,6 +312,8 @@ FluentdSpec defines the desired state of Fluentd | volumeMounts | Pod volumes to mount into the container's filesystem. Cannot be updated. | []corev1.VolumeMount | | volumeClaimTemplates | volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. | []corev1.PersistentVolumeClaim | | service | Service represents configurations on the fluentd service. | FluentDService | +| securityContext | PodSecurityContext represents the security context for the fluentd pods. | *corev1.PodSecurityContext | +| schedulerName | SchedulerName represents the desired scheduler for fluentd pods. | string | [Back to TOC](#table-of-contents) # FluentdStatus diff --git a/docs/plugins/fluentbit/custom/custom.md b/docs/plugins/fluentbit/custom/custom.md index c759a1123..8e4a2fc70 100644 --- a/docs/plugins/fluentbit/custom/custom.md +++ b/docs/plugins/fluentbit/custom/custom.md @@ -1,6 +1,6 @@ # CustomPlugin -CustomPlugin is used to support filter plugins that are not implemented yet.
**For example usage, refer to https://github.com/jjsiv/fluent-operator/blob/master/docs/best-practice/custom-plugin.md** +CustomPlugin is used to support filter plugins that are not implemented yet.
**For example usage, refer to https://github.com/fluent/fluent-operator/blob/master/docs/best-practice/custom-plugin.md** | Field | Description | Scheme | diff --git a/docs/plugins/fluentbit/index.md b/docs/plugins/fluentbit/index.md index a31dfdc51..c6bdcabb7 100644 --- a/docs/plugins/fluentbit/index.md +++ b/docs/plugins/fluentbit/index.md @@ -1,40 +1,40 @@ - [ClusterInput](../../fluentbit.md#ClusterInput) - - [dummy](clusterinput/dummy.md) - - [tail](clusterinput/tail.md) - - [systemd](clusterinput/systemd.md) - - [node exporter metrics](clusterinput/node-exporter-metrics.md) - - [fluentbit metrics](clusterinput/fluentbit-metrics.md) - - [prometheus scrape metrics](clusterinput/prometheus-scrape-metrics.md) + - [dummy](input/dummy.md) + - [tail](input/tail.md) + - [systemd](input/systemd.md) + - [node exporter metrics](input/node-exporter-metrics.md) + - [fluentbit metrics](input/fluentbit-metrics.md) + - [prometheus scrape metrics](input/prometheus-scrape-metrics.md) - [ClusterParser](../../fluentbit.md#ClusterParser) - - [json](clusterparser/json.md) - - [logfmt](clusterparser/logfmt.md) - - [lstv](clusterparser/lstv.md) - - [regex](clusterparser/regex.md) + - [json](parser/json.md) + - [logfmt](parser/logfmt.md) + - [lstv](parser/lstv.md) + - [regex](parser/regex.md) - [ClusterFilter](../../fluentbit.md#ClusterFilter) - - [kubernetes](clusterfilter/kubernetes.md) - - [modify](clusterfilter/modify.md) - - [nest](clusterfilter/nest.md) - - [parser](clusterfilter/parser.md) - - [grep](clusterfilter/grep.md) - - [record modifier](clusterfilter/recordmodifier.md) - - [lua](clusterfilter/lua.md) - - [throttle](clusterfilter/throttle.md) - - [aws](clusterfilter/aws.md) - - [multiline](clusterfilter/multiline.md) + - [kubernetes](filter/kubernetes.md) + - [modify](filter/modify.md) + - [nest](filter/nest.md) + - [parser](filter/parser.md) + - [grep](filter/grep.md) + - [record modifier](filter/recordmodifier.md) + - [lua](filter/lua.md) + - [throttle](filter/throttle.md) + - [aws](filter/aws.md) + - [multiline](filter/multiline.md) - [ClusterOutput](../../fluentbit.md#ClusterOutput) - - [elasticsearch](clusteroutput/elasticsearch.md) - - [file](clusteroutput/file.md) - - [forward](clusteroutput/forward.md) - - [http](clusteroutput/http.md) - - [kafka](clusteroutput/kafka.md) - - [null](clusteroutput/null.md) - - [stdout](clusteroutput/stdout.md) - - [tcp](clusteroutput/tcp.md) - - [loki](clusteroutput/loki.md) - - [syslog](clusteroutput/syslog.md) - - [datadog](clusteroutput/datadog.md) - - [firehose](clusteroutput/firehose.md) - - [opensearch](clusteroutput/opensearch.md) - - [opentelemetry](clusteroutput/opentelemetry.md) - - [prometheus remote write](clusteroutput/prometheus-remote-write.md) + - [elasticsearch](output/elasticsearch.md) + - [file](output/file.md) + - [forward](output/forward.md) + - [http](output/http.md) + - [kafka](output/kafka.md) + - [null](output/null.md) + - [stdout](output/stdout.md) + - [tcp](output/tcp.md) + - [loki](output/loki.md) + - [syslog](output/syslog.md) + - [datadog](output/datadog.md) + - [firehose](output/firehose.md) + - [opensearch](output/opensearch.md) + - [opentelemetry](output/opentelemetry.md) + - [prometheus remote write](output/prometheus-remote-write.md) diff --git a/docs/plugins/fluentbit/output/kinesis.md b/docs/plugins/fluentbit/output/kinesis.md new file mode 100644 index 000000000..4e7ceb7e3 --- /dev/null +++ b/docs/plugins/fluentbit/output/kinesis.md @@ -0,0 +1,17 @@ +# Kinesis + +The Kinesis output plugin, allows to ingest your records into AWS Kinesis.
It uses the new high performance and highly efficient kinesis plugin is called kinesis_streams instead of the older Golang Fluent Bit plugin released in 2019. https://docs.fluentbit.io/manual/pipeline/outputs/kinesis
https://github.com/aws/amazon-kinesis-streams-for-fluent-bit
+ + +| Field | Description | Scheme | +| ----- | ----------- | ------ | +| region | The AWS region. | string | +| stream | The name of the Kinesis Streams Delivery stream that you want log records sent to. | string | +| timeKey | Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be added to records sent to Kinesis. | string | +| timeKeyFormat | strftime compliant format string for the timestamp; for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond precision with '%3N' and supports nanosecond precision with '%9N' and '%L'; for example, adding '%3N' to support millisecond '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. | string | +| logKey | By default, the whole log record will be sent to Kinesis. If you specify a key name with this option, then only the value of that key will be sent to Kinesis. For example, if you are using the Fluentd Docker log driver, you can specify log_key log and only the log message will be sent to Kinesis. | string | +| roleARN | ARN of an IAM role to assume (for cross account access). | string | +| endpoint | Specify a custom endpoint for the Kinesis API. | string | +| stsEndpoint | Custom endpoint for the STS API. | string | +| autoRetryRequests | Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit retry mechanism with backoff. Instead, it enables an immediate retry with no delay for networking errors, which may help improve throughput when there are transient/random networking issues. This option defaults to true. | *bool | +| externalID | Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID. | string | diff --git a/docs/plugins/fluentbit/output/loki.md b/docs/plugins/fluentbit/output/loki.md index cb3c40312..bba70c603 100644 --- a/docs/plugins/fluentbit/output/loki.md +++ b/docs/plugins/fluentbit/output/loki.md @@ -12,6 +12,10 @@ The loki output plugin, allows to ingest your records into a Loki service.
**For full documentation, refer to https://docs.fluentbit.io/manual/pipeline/outputs/s3** + + +| Field | Description | Scheme | +| ----- | ----------- | ------ | +| region | The AWS region of your S3 bucket | string | +| bucket | S3 Bucket name | string | +| json_date_key | Specify the name of the time key in the output record. To disable the time key just set the value to false. | string | +| json_date_format | Specify the format of the date. Supported formats are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681) | string | +| total_file_size | Specifies the size of files in S3. Minimum size is 1M. With use_put_object On the maximum size is 1G. With multipart upload mode, the maximum size is 50G. | string | +| upload_chunk_size | The size of each 'part' for multipart uploads. Max: 50M | string | +| upload_timeout | Whenever this amount of time has elapsed, Fluent Bit will complete an upload and create a new file in S3. For example, set this value to 60m and you will get a new file every hour. | string | +| store_dir | Directory to locally buffer data before sending. | string | +| store_dir_limit_size | The size of the limitation for disk usage in S3. | string | +| s3_key_format | Format string for keys in S3. | string | +| s3_key_format_tag_delimiters | A series of characters which will be used to split the tag into 'parts' for use with the s3_key_format option. | string | +| static_file_path | Disables behavior where UUID string is automatically appended to end of S3 key name when $UUID is not provided in s3_key_format. $UUID, time formatters, $TAG, and other dynamic key formatters all work as expected while this feature is set to true. | *bool | +| use_put_object | Use the S3 PutObject API, instead of the multipart upload API. | *bool | +| role_arn | ARN of an IAM role to assume | string | +| endpoint | Custom endpoint for the S3 API. | string | +| sts_endpoint | Custom endpoint for the STS API. | string | +| canned_acl | Predefined Canned ACL Policy for S3 objects. | string | +| compression | Compression type for S3 objects. | string | +| content_type | A standard MIME type for the S3 object; this will be set as the Content-Type HTTP header. | string | +| send_content_md5 | Send the Content-MD5 header with PutObject and UploadPart requests, as is required when Object Lock is enabled. | *bool | +| auto_retry_requests | Immediately retry failed requests to AWS services once. | *bool | +| log_key | By default, the whole log record will be sent to S3. If you specify a key name with this option, then only the value of that key will be sent to S3. | string | +| preserve_data_ordering | Normally, when an upload request fails, there is a high chance for the last received chunk to be swapped with a later chunk, resulting in data shuffling. This feature prevents this shuffling by using a queue logic for uploads. | *bool | +| storage_class | Specify the storage class for S3 objects. If this option is not specified, objects will be stored with the default 'STANDARD' storage class. | string | +| retry_limit | Integer value to set the maximum number of retries allowed. | *int32 | +| external_id | Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID. | string | diff --git a/docs/plugins/fluentd/common/common.md b/docs/plugins/fluentd/common/common.md index 2b3979b76..75f89cf46 100644 --- a/docs/plugins/fluentd/common/common.md +++ b/docs/plugins/fluentd/common/common.md @@ -15,8 +15,8 @@ Time defines the common parameters for the time plugin | Field | Description | Scheme | | ----- | ----------- | ------ | -| timeType | parses/formats value according to this type, default is *string | *string | -| timeFormat | Process value according to the specified format. This is available only when time_type is *string | *string | +| timeType | parses/formats value according to this type, default is string | *string | +| timeFormat | Process value according to the specified format. This is available only when time_type is string | *string | | localtime | If true, uses local time. | *bool | | utc | If true, uses UTC. | *bool | | timezone | Uses the specified timezone. | *string | diff --git a/docs/plugins/fluentd/output/datadog.md b/docs/plugins/fluentd/output/datadog.md new file mode 100644 index 000000000..ac0a460c5 --- /dev/null +++ b/docs/plugins/fluentd/output/datadog.md @@ -0,0 +1,28 @@ +# Datadog + +Datadog defines the parameters for out_datadog plugin + + +| Field | Description | Scheme | +| ----- | ----------- | ------ | +| apiKey | This parameter is required in order to authenticate your fluent agent. | *[plugins.Secret](../secret.md) | +| useJson | Event format, if true, the event is sent in json format. Othwerwise, in plain text. | *bool | +| includeTagKey | Automatically include the Fluentd tag in the record. | *bool | +| tagKey | Where to store the Fluentd tag. | *string | +| timestampKey | Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. | *string | +| useSSL | If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. | *bool | +| noSSLValidation | Disable SSL validation (useful for proxy forwarding) | *bool | +| sslPort | Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. | *uint32 | +| maxRetries | The number of retries before the output plugin stops. Set to -1 for unlimited retries | *uint32 | +| maxBackoff | The maximum time waited between each retry in seconds | *uint32 | +| useHTTP | Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 | *bool | +| useCompression | Enable log compression for HTTP | *bool | +| compressionLevel | Set the log compression level for HTTP (1 to 9, 9 being the best ratio) | *uint32 | +| ddSource | This tells Datadog what integration it is | *string | +| ddSourcecategory | Multiple value attribute. Can be used to refine the source attribute | *string | +| ddTags | Custom tags with the following format \"key1:value1, key2:value2\" | *string | +| ddHostname | Used by Datadog to identify the host submitting the logs. | *string | +| service | Used by Datadog to correlate between logs, traces and metrics. | *string | +| port | Proxy port when logs are not directly forwarded to Datadog and ssl is not used | *uint32 | +| host | Proxy endpoint when logs are not directly forwarded to Datadog | *string | +| httpProxy | HTTP proxy, only takes effect if HTTP forwarding is enabled (use_http). Defaults to HTTP_PROXY/http_proxy env vars. | *string | diff --git a/docs/plugins/fluentd/output/types.md b/docs/plugins/fluentd/output/types.md index 72c4aebcd..2ed549f23 100644 --- a/docs/plugins/fluentd/output/types.md +++ b/docs/plugins/fluentd/output/types.md @@ -24,3 +24,4 @@ Output defines all available output plugins and their parameters | loki | out_loki plugin | *Loki | | customPlugin | Custom plugin type | *custom.CustomPlugin | | cloudWatch | out_cloudwatch plugin | *CloudWatch | +| datadog | datadog plugin | *Datadog | diff --git a/go.mod b/go.mod index 122e33c01..b8ff43e8b 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,9 @@ require ( github.com/joho/godotenv v1.5.1 github.com/oklog/run v1.1.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.27.6 + github.com/onsi/gomega v1.27.7 k8s.io/api v0.26.3 - k8s.io/apimachinery v0.27.1 + k8s.io/apimachinery v0.27.2 k8s.io/client-go v0.26.3 k8s.io/klog/v2 v2.100.1 sigs.k8s.io/controller-runtime v0.14.6 @@ -58,14 +58,14 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/tools v0.9.1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect @@ -77,7 +77,7 @@ require ( k8s.io/code-generator v0.26.1 // indirect k8s.io/component-base v0.26.1 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index 609959a66..5268da82a 100644 --- a/go.sum +++ b/go.sum @@ -234,11 +234,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -350,8 +350,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -386,8 +386,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -451,12 +451,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -464,8 +464,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -516,8 +516,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -641,8 +641,8 @@ k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= -k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= +k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= +k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo= @@ -654,8 +654,8 @@ k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/manifests/kubeedge/fluentbit-fluentbit-edge.yaml b/manifests/kubeedge/fluentbit-fluentbit-edge.yaml index 90e0804b1..6cd76ee5a 100644 --- a/manifests/kubeedge/fluentbit-fluentbit-edge.yaml +++ b/manifests/kubeedge/fluentbit-fluentbit-edge.yaml @@ -6,7 +6,7 @@ metadata: labels: app.kubernetes.io/name: fluent-bit spec: - image: kubesphere/fluent-bit:v2.1.3 + image: kubesphere/fluent-bit:v2.1.4 positionDB: hostPath: path: /var/lib/fluent-bit/ diff --git a/manifests/logging-stack/fluentbit-fluentBit.yaml b/manifests/logging-stack/fluentbit-fluentBit.yaml index 606871053..19eca277e 100644 --- a/manifests/logging-stack/fluentbit-fluentBit.yaml +++ b/manifests/logging-stack/fluentbit-fluentBit.yaml @@ -6,7 +6,7 @@ metadata: labels: app.kubernetes.io/name: fluent-bit spec: - image: kubesphere/fluent-bit:v2.1.3 + image: kubesphere/fluent-bit:v2.1.4 positionDB: hostPath: path: /var/lib/fluent-bit/ diff --git a/manifests/quick-start/fluentbit.yaml b/manifests/quick-start/fluentbit.yaml index 8afa784da..0167c4c08 100644 --- a/manifests/quick-start/fluentbit.yaml +++ b/manifests/quick-start/fluentbit.yaml @@ -6,7 +6,7 @@ metadata: labels: app.kubernetes.io/name: fluent-bit spec: - image: kubesphere/fluent-bit:v2.1.3 + image: kubesphere/fluent-bit:v2.1.4 fluentBitConfigName: fluent-bit-config --- diff --git a/manifests/regex-parser/fluentbit-fluentBit.yaml b/manifests/regex-parser/fluentbit-fluentBit.yaml index af204b93f..0a79930ad 100644 --- a/manifests/regex-parser/fluentbit-fluentBit.yaml +++ b/manifests/regex-parser/fluentbit-fluentBit.yaml @@ -6,5 +6,5 @@ metadata: labels: app.kubernetes.io/name: fluent-bit spec: - image: kubesphere/fluent-bit:v2.1.3 + image: kubesphere/fluent-bit:v2.1.4 fluentBitConfigName: fluent-bit-config diff --git a/manifests/setup/fluent-operator-crd.yaml b/manifests/setup/fluent-operator-crd.yaml index ebaa5c81e..5ffe028e4 100644 --- a/manifests/setup/fluent-operator-crd.yaml +++ b/manifests/setup/fluent-operator-crd.yaml @@ -903,7 +903,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -917,11 +917,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -1061,7 +1061,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1071,11 +1071,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1115,7 +1115,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -1125,11 +1125,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -3389,6 +3389,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -4252,6 +4307,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: @@ -5079,7 +5248,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -5089,11 +5258,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -5292,6 +5461,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -5428,7 +5720,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -5438,11 +5730,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -6120,7 +6412,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -6130,11 +6422,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -8014,6 +8306,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for the + Fluentbit collector pods + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: @@ -10779,7 +11075,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -10793,11 +11089,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -10937,7 +11233,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -10947,11 +11243,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -10991,7 +11287,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -11001,11 +11297,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -12146,6 +12442,166 @@ spec: containerLogRealPath: description: Container log path type: string + containerSecurityContext: + description: ContainerSecurityContext holds container-level security + attributes. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. Note that this field cannot be set when spec.os.name + is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object disableService: description: DisableService tells if the fluentbit service should be deployed. @@ -15565,6 +16021,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluent-bit + pods. + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: @@ -18951,24 +19411,116 @@ spec: type: object type: object type: object - disableService: - description: By default will build the related service according to - the globalinputs definition. - type: boolean - envVars: - description: EnvVars represent environment variables that can be passed - to fluentd pods. - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot + defaultFilterSelector: + description: Select cluster filter plugins used to filter for the + default cluster output + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + defaultOutputSelector: + description: Select cluster output plugins used to send all logs that + did not match a route to the matching outputs + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + disableService: + description: By default will build the related service according to + the globalinputs definition. + type: boolean + envVars: + description: EnvVars represent environment variables that can be passed + to fluentd pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the @@ -19436,7 +19988,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -19450,11 +20002,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -19685,7 +20237,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -19699,11 +20251,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -19829,6 +20381,17 @@ spec: type: object x-kubernetes-map-type: atomic type: array + logLevel: + default: info + description: Global logging verbosity + enum: + - fatal + - error + - warn + - info + - debug + - trace + type: string nodeSelector: additionalProperties: type: string @@ -19942,6 +20505,179 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluentd + pods. + type: string + securityContext: + description: PodSecurityContext represents the security context for + the fluentd pods. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object service: description: Service represents configurations on the fluentd service. properties: @@ -23286,6 +24022,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -24149,6 +24940,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: @@ -24976,7 +25881,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -24986,11 +25891,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -25189,6 +26094,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -25325,7 +26353,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -25335,11 +26363,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -26017,7 +27045,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -26027,11 +27055,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/manifests/setup/setup.yaml b/manifests/setup/setup.yaml index 9f1a20f56..cb3757a50 100644 --- a/manifests/setup/setup.yaml +++ b/manifests/setup/setup.yaml @@ -903,7 +903,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -917,11 +917,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -1061,7 +1061,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -1071,11 +1071,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -1115,7 +1115,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -1125,11 +1125,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -3389,6 +3389,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -4252,6 +4307,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: @@ -5079,7 +5248,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -5089,11 +5258,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -5292,6 +5461,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -5428,7 +5720,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -5438,11 +5730,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -6120,7 +6412,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -6130,11 +6422,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -8014,6 +8306,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for the + Fluentbit collector pods + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: @@ -10779,7 +11075,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -10793,11 +11089,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -10937,7 +11233,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -10947,11 +11243,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -10991,7 +11287,7 @@ spec: timeFormat: description: Process value according to the specified format. This is available only when time_type - is *string + is string type: string timeFormatFallbacks: description: Uses the specified time format as a @@ -11001,11 +11297,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -12146,6 +12442,166 @@ spec: containerLogRealPath: description: Container log path type: string + containerSecurityContext: + description: ContainerSecurityContext holds container-level security + attributes. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. Note that this field cannot be set when spec.os.name + is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object disableService: description: DisableService tells if the fluentbit service should be deployed. @@ -15565,6 +16021,10 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluent-bit + pods. + type: string secrets: description: The Secrets are mounted into /fluent-bit/secrets/. items: @@ -18951,24 +19411,116 @@ spec: type: object type: object type: object - disableService: - description: By default will build the related service according to - the globalinputs definition. - type: boolean - envVars: - description: EnvVars represent environment variables that can be passed - to fluentd pods. - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot + defaultFilterSelector: + description: Select cluster filter plugins used to filter for the + default cluster output + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + defaultOutputSelector: + description: Select cluster output plugins used to send all logs that + did not match a route to the matching outputs + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + disableService: + description: By default will build the related service according to + the globalinputs definition. + type: boolean + envVars: + description: EnvVars represent environment variables that can be passed + to fluentd pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the @@ -19436,7 +19988,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -19450,11 +20002,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -19685,7 +20237,7 @@ spec: type: string timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -19699,11 +20251,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timeout: @@ -19829,6 +20381,17 @@ spec: type: object x-kubernetes-map-type: atomic type: array + logLevel: + default: info + description: Global logging verbosity + enum: + - fatal + - error + - warn + - info + - debug + - trace + type: string nodeSelector: additionalProperties: type: string @@ -19942,6 +20505,179 @@ spec: runtimeClassName: description: RuntimeClassName represents the container runtime configuration. type: string + schedulerName: + description: SchedulerName represents the desired scheduler for fluentd + pods. + type: string + securityContext: + description: PodSecurityContext represents the security context for + the fluentd pods. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object service: description: Service represents configurations on the fluentd service. properties: @@ -23286,6 +24022,61 @@ spec: will be used. type: string type: object + kinesis: + description: Kinesis defines Kinesis Output configuration. + properties: + autoRetryRequests: + description: Immediately retry failed requests to AWS services + once. This option does not affect the normal Fluent Bit retry + mechanism with backoff. Instead, it enables an immediate retry + with no delay for networking errors, which may help improve + throughput when there are transient/random networking issues. + This option defaults to true. + type: boolean + endpoint: + description: Specify a custom endpoint for the Kinesis API. + type: string + externalID: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + logKey: + description: By default, the whole log record will be sent to + Kinesis. If you specify a key name with this option, then only + the value of that key will be sent to Kinesis. For example, + if you are using the Fluentd Docker log driver, you can specify + log_key log and only the log message will be sent to Kinesis. + type: string + region: + description: The AWS region. + type: string + roleARN: + description: ARN of an IAM role to assume (for cross account access). + type: string + stream: + description: The name of the Kinesis Streams Delivery stream that + you want log records sent to. + type: string + stsEndpoint: + description: Custom endpoint for the STS API. + type: string + timeKey: + description: Add the timestamp to the record under this key. By + default the timestamp from Fluent Bit will not be added to records + sent to Kinesis. + type: string + timeKeyFormat: + description: strftime compliant format string for the timestamp; + for example, the default is '%Y-%m-%dT%H:%M:%S'. Supports millisecond + precision with '%3N' and supports nanosecond precision with + '%9N' and '%L'; for example, adding '%3N' to support millisecond + '%Y-%m-%dT%H:%M:%S.%3N'. This option is used with time_key. + type: string + required: + - region + - stream + type: object logLevel: description: 'Set the plugin''s logging verbosity level. Allowed values are: off, error, warn, info, debug and trace, Defaults to the SERVICE @@ -24149,6 +24940,120 @@ spec: allows to disable retries or impose a limit to try N times and then discard the data after reaching that limit. type: string + s3: + description: S3 defines S3 Output configuration. + properties: + auto_retry_requests: + description: Immediately retry failed requests to AWS services + once. + type: boolean + bucket: + description: S3 Bucket name + type: string + canned_acl: + description: Predefined Canned ACL Policy for S3 objects. + type: string + compression: + description: Compression type for S3 objects. + type: string + content_type: + description: A standard MIME type for the S3 object; this will + be set as the Content-Type HTTP header. + type: string + endpoint: + description: Custom endpoint for the S3 API. + type: string + external_id: + description: Specify an external ID for the STS API, can be used + with the role_arn parameter if your role requires an external + ID. + type: string + json_date_format: + description: 'Specify the format of the date. Supported formats + are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z) + and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681)' + type: string + json_date_key: + description: Specify the name of the time key in the output record. + To disable the time key just set the value to false. + type: string + log_key: + description: By default, the whole log record will be sent to + S3. If you specify a key name with this option, then only the + value of that key will be sent to S3. + type: string + preserve_data_ordering: + description: Normally, when an upload request fails, there is + a high chance for the last received chunk to be swapped with + a later chunk, resulting in data shuffling. This feature prevents + this shuffling by using a queue logic for uploads. + type: boolean + region: + description: The AWS region of your S3 bucket + type: string + retry_limit: + description: Integer value to set the maximum number of retries + allowed. + format: int32 + type: integer + role_arn: + description: ARN of an IAM role to assume + type: string + s3_key_format: + description: Format string for keys in S3. + type: string + s3_key_format_tag_delimiters: + description: A series of characters which will be used to split + the tag into 'parts' for use with the s3_key_format option. + type: string + send_content_md5: + description: Send the Content-MD5 header with PutObject and UploadPart + requests, as is required when Object Lock is enabled. + type: boolean + static_file_path: + description: Disables behavior where UUID string is automatically + appended to end of S3 key name when $UUID is not provided in + s3_key_format. $UUID, time formatters, $TAG, and other dynamic + key formatters all work as expected while this feature is set + to true. + type: boolean + storage_class: + description: Specify the storage class for S3 objects. If this + option is not specified, objects will be stored with the default + 'STANDARD' storage class. + type: string + store_dir: + description: Directory to locally buffer data before sending. + type: string + store_dir_limit_size: + description: The size of the limitation for disk usage in S3. + type: string + sts_endpoint: + description: Custom endpoint for the STS API. + type: string + total_file_size: + description: Specifies the size of files in S3. Minimum size is + 1M. With use_put_object On the maximum size is 1G. With multipart + upload mode, the maximum size is 50G. + type: string + upload_chunk_size: + description: 'The size of each ''part'' for multipart uploads. + Max: 50M' + type: string + upload_timeout: + description: Whenever this amount of time has elapsed, Fluent + Bit will complete an upload and create a new file in S3. For + example, set this value to 60m and you will get a new file every + hour. + type: string + use_put_object: + description: Use the S3 PutObject API, instead of the multipart + upload API. + type: boolean + required: + - bucket + - region + type: object splunk: description: Splunk defines Splunk Output Configuration properties: @@ -24976,7 +25881,7 @@ spec: type: string timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -24986,11 +25891,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timekey: @@ -25189,6 +26094,129 @@ spec: required: - config type: object + datadog: + description: datadog plugin + properties: + apiKey: + description: This parameter is required in order to authenticate + your fluent agent. + properties: + valueFrom: + description: ValueSource defines how to find a value's + key. + properties: + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + compressionLevel: + description: Set the log compression level for HTTP (1 to + 9, 9 being the best ratio) + format: int32 + type: integer + ddHostname: + description: Used by Datadog to identify the host submitting + the logs. + type: string + ddSource: + description: This tells Datadog what integration it is + type: string + ddSourcecategory: + description: Multiple value attribute. Can be used to refine + the source attribute + type: string + ddTags: + description: Custom tags with the following format "key1:value1, + key2:value2" + type: string + host: + description: Proxy endpoint when logs are not directly forwarded + to Datadog + type: string + httpProxy: + description: HTTP proxy, only takes effect if HTTP forwarding + is enabled (use_http). Defaults to HTTP_PROXY/http_proxy + env vars. + type: string + includeTagKey: + description: Automatically include the Fluentd tag in the + record. + type: boolean + maxBackoff: + description: The maximum time waited between each retry + in seconds + format: int32 + type: integer + maxRetries: + description: The number of retries before the output plugin + stops. Set to -1 for unlimited retries + format: int32 + type: integer + noSSLValidation: + description: Disable SSL validation (useful for proxy forwarding) + type: boolean + port: + description: Proxy port when logs are not directly forwarded + to Datadog and ssl is not used + format: int32 + maximum: 65535 + minimum: 1 + type: integer + service: + description: Used by Datadog to correlate between logs, + traces and metrics. + type: string + sslPort: + description: Port used to send logs over a SSL encrypted + connection to Datadog. If use_http is disabled, use 10516 + for the US region and 443 for the EU region. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tagKey: + description: Where to store the Fluentd tag. + type: string + timestampKey: + description: Name of the attribute which will contain timestamp + of the log event. If nil, timestamp attribute is not added. + type: string + useCompression: + description: Enable log compression for HTTP + type: boolean + useHTTP: + description: Enable HTTP forwarding. If you disable it, + make sure to change the port to 10514 or ssl_port to 10516 + type: boolean + useJson: + description: Event format, if true, the event is sent in + json format. Othwerwise, in plain text. + type: boolean + useSSL: + description: If true, the agent initializes a secure connection + to Datadog. In clear TCP otherwise. + type: boolean + type: object elasticsearch: description: out_es plugin properties: @@ -25325,7 +26353,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified format. - This is available only when time_type is *string + This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -25335,11 +26363,11 @@ spec: type: string timeType: description: parses/formats value according to this type, - default is *string + default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: @@ -26017,7 +27045,7 @@ spec: type: boolean timeFormat: description: Process value according to the specified - format. This is available only when time_type is *string + format. This is available only when time_type is string type: string timeFormatFallbacks: description: Uses the specified time format as a fallback @@ -26027,11 +27055,11 @@ spec: type: string timeType: description: parses/formats value according to this - type, default is *string + type, default is string enum: - float - unixtime - - '*string' + - string - mixed type: string timezone: diff --git a/pkg/operator/collector-service.go b/pkg/operator/collector-service.go index 0985eb05b..842eac5ec 100644 --- a/pkg/operator/collector-service.go +++ b/pkg/operator/collector-service.go @@ -9,12 +9,12 @@ import ( ) const ( - CollecotrMetricsPortName = "metrics" - CollecotrMetricsPort = 2020 - CollecotrTCPProtocolName = "TCP" + CollectorMetricsPortName = "metrics" + CollectorMetricsPort = 2020 + CollectorTCPProtocolName = "TCP" ) -func MakeCollecotrService(co fluentbitv1alpha2.Collector) *corev1.Service { +func MakeCollectorService(co fluentbitv1alpha2.Collector) *corev1.Service { var name string var labels map[string]string @@ -41,10 +41,10 @@ func MakeCollecotrService(co fluentbitv1alpha2.Collector) *corev1.Service { Type: corev1.ServiceTypeClusterIP, Ports: []corev1.ServicePort{ { - Name: CollecotrMetricsPortName, - Port: CollecotrMetricsPort, - Protocol: CollecotrTCPProtocolName, - TargetPort: intstr.FromInt(CollecotrMetricsPort), + Name: CollectorMetricsPortName, + Port: CollectorMetricsPort, + Protocol: CollectorTCPProtocolName, + TargetPort: intstr.FromInt(CollectorMetricsPort), }, }, }, diff --git a/pkg/operator/collector-statefulset.go b/pkg/operator/collector-statefulset.go index e3844bd34..79edaf1f7 100644 --- a/pkg/operator/collector-statefulset.go +++ b/pkg/operator/collector-statefulset.go @@ -16,7 +16,7 @@ var ( DefaultBufferPath = "/buffers/fluentbit/log" ) -func MakefbStatefuset(co fluentbitv1alpha2.Collector) *appsv1.StatefulSet { +func MakefbStatefulset(co fluentbitv1alpha2.Collector) *appsv1.StatefulSet { statefulset := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: co.Name, @@ -106,6 +106,10 @@ func MakefbStatefuset(co fluentbitv1alpha2.Collector) *appsv1.StatefulSet { statefulset.Spec.Template.Spec.PriorityClassName = co.Spec.PriorityClassName } + if co.Spec.SchedulerName != "" { + statefulset.Spec.Template.Spec.SchedulerName = co.Spec.SchedulerName + } + if co.Spec.Volumes != nil { statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, co.Spec.Volumes...) } @@ -134,7 +138,7 @@ func MakefbStatefuset(co fluentbitv1alpha2.Collector) *appsv1.StatefulSet { statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, MakeFluentbitPVC(co)) statefulset.Spec.Template.Spec.Containers[0].VolumeMounts = append(statefulset.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ Name: fmt.Sprintf("%s-buffer-pvc", co.Name), - MountPath: FlunetbitBufferMountPath(co), + MountPath: FluentbitBufferMountPath(co), }) return &statefulset @@ -185,7 +189,7 @@ func makeDefaultFluentbitPVC(co fluentbitv1alpha2.Collector) corev1.PersistentVo return pvc } -func FlunetbitBufferMountPath(co fluentbitv1alpha2.Collector) string { +func FluentbitBufferMountPath(co fluentbitv1alpha2.Collector) string { bufferPath := co.Spec.BufferPath if bufferPath != nil { return *bufferPath diff --git a/pkg/operator/daemonset.go b/pkg/operator/daemonset.go index 645dc6dcd..4d9eb75e8 100644 --- a/pkg/operator/daemonset.go +++ b/pkg/operator/daemonset.go @@ -135,7 +135,8 @@ func MakeDaemonSet(fb fluentbitv1alpha2.FluentBit, logPath string) *appsv1.Daemo MountPath: "/var/log/journal", }, }, - Resources: fb.Spec.Resources, + Resources: fb.Spec.Resources, + SecurityContext: fb.Spec.ContainerSecurityContext, }, }, NodeSelector: fb.Spec.NodeSelector, @@ -176,6 +177,10 @@ func MakeDaemonSet(fb fluentbitv1alpha2.FluentBit, logPath string) *appsv1.Daemo ds.Spec.Template.Spec.PriorityClassName = fb.Spec.PriorityClassName } + if fb.Spec.SchedulerName != "" { + ds.Spec.Template.Spec.SchedulerName = fb.Spec.SchedulerName + } + if fb.Spec.Volumes != nil { ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, fb.Spec.Volumes...) } diff --git a/pkg/operator/pvc.go b/pkg/operator/pvc.go index c25aaf909..89672eca8 100644 --- a/pkg/operator/pvc.go +++ b/pkg/operator/pvc.go @@ -30,9 +30,10 @@ func MakeFluentdPVC(fd fluentdv1alpha1.Fluentd) *corev1.PersistentVolumeClaim { Labels: labels, }, Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: bufferPvc.AccessModes, - Resources: bufferPvc.Resources, - VolumeMode: bufferPvc.VolumeMode, + AccessModes: bufferPvc.AccessModes, + Resources: bufferPvc.Resources, + VolumeMode: bufferPvc.VolumeMode, + StorageClassName: bufferPvc.StorageClassName, }, } return &pvc diff --git a/pkg/operator/rbac.go b/pkg/operator/rbac.go index 1321d84ee..144013ca9 100644 --- a/pkg/operator/rbac.go +++ b/pkg/operator/rbac.go @@ -9,10 +9,10 @@ import ( ) func MakeRBACObjects(name, namespace, component string, additionalRules []rbacv1.PolicyRule, saAnnotations map[string]string) (*rbacv1.ClusterRole, *corev1.ServiceAccount, *rbacv1.ClusterRoleBinding) { - rbacName := fmt.Sprintf("fluent-operator-%s", component) + crName, saName, crbName := MakeRBACNames(name, component) cr := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: rbacName, + Name: crName, }, Rules: []rbacv1.PolicyRule{ { @@ -29,7 +29,7 @@ func MakeRBACObjects(name, namespace, component string, additionalRules []rbacv1 sa := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: saName, Namespace: namespace, Annotations: saAnnotations, }, @@ -37,7 +37,7 @@ func MakeRBACObjects(name, namespace, component string, additionalRules []rbacv1 crb := rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("fluent-operator-%s-%s", component, name), + Name: crbName, }, Subjects: []rbacv1.Subject{ { @@ -49,18 +49,19 @@ func MakeRBACObjects(name, namespace, component string, additionalRules []rbacv1 RoleRef: rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "ClusterRole", - Name: rbacName, + Name: crName, }, } return &cr, &sa, &crb } -func MakeScopedRBACObjects(fbName, fbNamespace string, saAnnotations map[string]string) (*rbacv1.Role, *corev1.ServiceAccount, *rbacv1.RoleBinding) { +func MakeScopedRBACObjects(name, namespace string, saAnnotations map[string]string) (*rbacv1.Role, *corev1.ServiceAccount, *rbacv1.RoleBinding) { + rName, saName, rbName := MakeScopedRBACNames(name) r := rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: "fluent:fluent-operator", - Namespace: fbNamespace, + Name: rName, + Namespace: namespace, }, Rules: []rbacv1.PolicyRule{ { @@ -73,30 +74,42 @@ func MakeScopedRBACObjects(fbName, fbNamespace string, saAnnotations map[string] sa := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: fbName, - Namespace: fbNamespace, + Name: saName, + Namespace: namespace, Annotations: saAnnotations, }, } rb := rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("fluent-operator-fluent-bit-%s", fbName), - Namespace: fbNamespace, + Name: rbName, + Namespace: namespace, }, Subjects: []rbacv1.Subject{ { Kind: rbacv1.ServiceAccountKind, - Name: fbName, - Namespace: fbNamespace, + Name: name, + Namespace: namespace, }, }, RoleRef: rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "Role", - Name: "fluent:fluent-operator", + Name: rName, }, } return &r, &sa, &rb } + +func MakeRBACNames(name, component string) (string, string, string) { + cr := fmt.Sprintf("fluent-operator-%s", component) + crb := fmt.Sprintf("fluent-operator-%s-%s", component, name) + return cr, name, crb +} + +func MakeScopedRBACNames(name string) (string, string, string) { + r := "fluent:fluent-operator" + rb := fmt.Sprintf("fluent-operator-fluent-bit-%s", name) + return r, name, rb +} diff --git a/pkg/operator/sts.go b/pkg/operator/sts.go index 19cef2af6..6b8b00342 100644 --- a/pkg/operator/sts.go +++ b/pkg/operator/sts.go @@ -21,6 +21,8 @@ const ( DefaultForwardPort int32 = 24424 DefaultHttpPort int32 = 9880 + // 101 is the fsGroup that fluentd runs as in the kubesphere image + DefaultFsGroup int64 = 101 DefaultForwardName = "forward" DefaultHttpName = "http" @@ -51,6 +53,8 @@ func MakeStatefulset(fd fluentdv1alpha1.Fluentd) *appsv1.StatefulSet { } } + defaultFsGroup := DefaultFsGroup + sts := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: fd.Name, @@ -109,6 +113,9 @@ func MakeStatefulset(fd fluentdv1alpha1.Fluentd) *appsv1.StatefulSet { NodeSelector: fd.Spec.NodeSelector, Tolerations: fd.Spec.Tolerations, Affinity: fd.Spec.Affinity, + SecurityContext: &corev1.PodSecurityContext{ + FSGroup: &defaultFsGroup, + }, }, }, }, @@ -138,6 +145,14 @@ func MakeStatefulset(fd fluentdv1alpha1.Fluentd) *appsv1.StatefulSet { sts.Spec.Template.Spec.Containers[0].Env = append(sts.Spec.Template.Spec.Containers[0].Env, fd.Spec.EnvVars...) } + if fd.Spec.SecurityContext != nil { + sts.Spec.Template.Spec.SecurityContext = fd.Spec.SecurityContext + } + + if fd.Spec.SchedulerName != "" { + sts.Spec.Template.Spec.SchedulerName = fd.Spec.SchedulerName + } + // Mount host or emptydir VolumeSource if fd.Spec.BufferVolume != nil && !fd.Spec.BufferVolume.DisableBufferVolume { bufferVolName := fmt.Sprintf("%s-buffer", fd.Name)