diff --git a/.github/workflows/publish-version-4.6.yaml b/.github/workflows/publish-version-4.6.yaml new file mode 100644 index 000000000..049053f1d --- /dev/null +++ b/.github/workflows/publish-version-4.6.yaml @@ -0,0 +1,86 @@ +name: Publish version 4.6 + +env: + doc_versionnumber: "4.6" + +on: + push: + branches: + - release-4.6 + workflow_dispatch: + +jobs: + build: + name: Build + runs-on: ubuntu-latest + + permissions: + contents: write + pages: write + id-token: write + + concurrency: + group: "pages" + cancel-in-progress: false + + environment: + name: github-pages-test + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: release-4.6 + submodules: 'recursive' + + - name: Set up Pages + id: pages + uses: actions/configure-pages@1f0c5cde4bc74cd7e1254d0cb4de8d49e9068c7d # v4.0.0 + + - name: Set up Hugo + uses: peaceiris/actions-hugo@16361eb4acea8698b220b76c0d4e84e1fd22c61d # v2.6.0 + with: + hugo-version: '0.110.0' + extended: true + + - name: Set up Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version: 18 + + - name: Install dependencies + run: | + cd themes/docsy + npm install + + - name: Set up PostCSS + run: npm install --save-dev autoprefixer postcss-cli postcss + + - name: Build + run: hugo --environment production --baseURL ${{ steps.pages.outputs.base_url }}/${{ env.doc_versionnumber }}/ + + # - name: Upload artifact + # uses: actions/upload-pages-artifact@64bcae551a7b18bcb9a09042ddf1960979799187 # v1.0.8 + # with: + # path: ./public/ + + - name: Checkout code to update + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: 'gh-pages-test' + path: 'tmp/gh-pages' + # - name: Display file structure + # run: ls -R + - name: Copy built site to GH pages + run: | + rm -rf tmp/gh-pages/${{ env.doc_versionnumber }} + mkdir -p tmp/gh-pages/${{ env.doc_versionnumber }} + mv public/* tmp/gh-pages/${{ env.doc_versionnumber }} + - name: Commit & Push changes + uses: actions-js/push@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + message: 'Publish updated docs for ${{ env.doc_versionnumber }}, ${{ github.event.repository.pushed_at}}' + branch: 'gh-pages-test' + directory: 'tmp/gh-pages' diff --git a/config/_default/config.toml b/config/_default/config.toml index 02edd83cd..8d80ccd5d 100644 --- a/config/_default/config.toml +++ b/config/_default/config.toml @@ -32,7 +32,7 @@ weight = 1 [social] # Used as twitter:site in page metadata -twitter = "calisti12" +twitter = "AxoflowIO" [markup] [markup.goldmark] @@ -69,7 +69,7 @@ twitter = "calisti12" # Used in the "version-banner" partial to display a version number for the # current doc set. - version = "4.6.0" + version = "4.7.0" version_menu = "Releases" version_menu_canonicallinks = true version_menu_pagelinks = true @@ -169,9 +169,13 @@ twitter = "calisti12" ####################### # Add your release versions here [[params.versions]] - version = "latest (4.6.0)" + version = "latest (4.7.0)" githubbranch = "master" url = "" +[[params.versions]] + version = "4.6" + githubbranch = "release-4.6" + url = "/4.6/" [[params.versions]] version = "4.5" githubbranch = "release-4.5" @@ -196,7 +200,7 @@ twitter = "calisti12" # Cascade version number to every doc page (needed to create sections for pagefind search) # Update this parameter when creating a new version [[cascade]] -body_attribute = 'data-pagefind-filter="section:4.6"' +body_attribute = 'data-pagefind-filter="section:4.7"' [cascade._target] path = '/docs/**' diff --git a/content/docs/configuration/crds/v1beta1/clusteroutput_types.md b/content/docs/configuration/crds/v1beta1/clusteroutput_types.md index 7f2991cf4..1b839f6f7 100644 --- a/content/docs/configuration/crds/v1beta1/clusteroutput_types.md +++ b/content/docs/configuration/crds/v1beta1/clusteroutput_types.md @@ -31,6 +31,9 @@ ClusterOutputSpec contains Kubernetes spec for ClusterOutput ### enabledNamespaces ([]string, optional) {#clusteroutputspec-enablednamespaces} +### protected (bool, optional) {#clusteroutputspec-protected} + + ## ClusterOutputList diff --git a/content/docs/configuration/crds/v1beta1/common_types.md b/content/docs/configuration/crds/v1beta1/common_types.md index 7b6847464..fa8647270 100644 --- a/content/docs/configuration/crds/v1beta1/common_types.md +++ b/content/docs/configuration/crds/v1beta1/common_types.md @@ -71,10 +71,10 @@ ServiceMonitorConfig defines the ServiceMonitor properties ### honorLabels (bool, optional) {#servicemonitorconfig-honorlabels} -### metricRelabelings ([]*v1.RelabelConfig, optional) {#servicemonitorconfig-metricrelabelings} +### metricRelabelings ([]v1.RelabelConfig, optional) {#servicemonitorconfig-metricrelabelings} -### relabelings ([]*v1.RelabelConfig, optional) {#servicemonitorconfig-relabelings} +### relabelings ([]v1.RelabelConfig, optional) {#servicemonitorconfig-relabelings} ### scheme (string, optional) {#servicemonitorconfig-scheme} @@ -95,6 +95,7 @@ Security defines Fluentd, FluentbitAgent deployment security properties Warning: this is not supported anymore and does nothing + ### roleBasedAccessControlCreate (*bool, optional) {#security-rolebasedaccesscontrolcreate} diff --git a/content/docs/configuration/crds/v1beta1/fluentbit_types.md b/content/docs/configuration/crds/v1beta1/fluentbit_types.md index 449231203..5ae490681 100644 --- a/content/docs/configuration/crds/v1beta1/fluentbit_types.md +++ b/content/docs/configuration/crds/v1beta1/fluentbit_types.md @@ -339,6 +339,12 @@ How many times a TCP keepalive connection can be used before being recycled Default: 0, disabled +### maxWorkerConnections (int, optional) {#fluentbitnetwork-maxworkerconnections} + +Set maximum number of TCP connections that can be established per worker. + +Default: 0, unlimited + ### sourceAddress (string, optional) {#fluentbitnetwork-sourceaddress} Specify network address (interface) to use for connection and data traffic. @@ -724,6 +730,21 @@ Default: On Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only. +### namespace_annotations (string, optional) {#filterkubernetes-namespace_annotations} + +Include Kubernetes namespace annotations on every record + + +### kube_meta_namespace_cache_ttl (string, optional) {#filterkubernetes-kube_meta_namespace_cache_ttl} + +Configurable TTL for K8s cached namespace metadata. (15m) + + +### namespace_labels (string, optional) {#filterkubernetes-namespace_labels} + +Include Kubernetes namespace labels on every record + + ### Regex_Parser (string, optional) {#filterkubernetes-regex_parser} Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example). diff --git a/content/docs/configuration/crds/v1beta1/fluentd_types.md b/content/docs/configuration/crds/v1beta1/fluentd_types.md index 7816d8b7c..828b4f584 100644 --- a/content/docs/configuration/crds/v1beta1/fluentd_types.md +++ b/content/docs/configuration/crds/v1beta1/fluentd_types.md @@ -34,6 +34,11 @@ BufferStorageVolume is by default configured as PVC using FluentdPvcSpec [volume ### compressConfigFile (bool, optional) {#fluentdspec-compressconfigfile} +### configCheck (*ConfigCheck, optional) {#fluentdspec-configcheck} + +Overrides the default logging level configCheck setup. This field is not used directly, just copied over the field in the logging resource if defined. + + ### configCheckAnnotations (map[string]string, optional) {#fluentdspec-configcheckannotations} @@ -107,6 +112,11 @@ Ignore same log lines [more info]( https://docs.fluentd.org/deployment/logging#i ### livenessProbe (*corev1.Probe, optional) {#fluentdspec-livenessprobe} +### logFormat (string, optional) {#fluentdspec-logformat} + +Set the logging format. Allowed values are: "text" (default) and "json". + + ### logLevel (string, optional) {#fluentdspec-loglevel} diff --git a/content/docs/configuration/crds/v1beta1/logging_types.md b/content/docs/configuration/crds/v1beta1/logging_types.md index 94e71d9cd..42a49bc08 100644 --- a/content/docs/configuration/crds/v1beta1/logging_types.md +++ b/content/docs/configuration/crds/v1beta1/logging_types.md @@ -21,7 +21,7 @@ Default: "cluster.local." ### configCheck (ConfigCheck, optional) {#loggingspec-configcheck} -ConfigCheck settings that apply to both fluentd and syslog-ng +ConfigCheck settings that apply to both fluentd or syslog-ng. Can be overridden on the fluentd / syslog-ng level. ### controlNamespace (string, required) {#loggingspec-controlnamespace} diff --git a/content/docs/configuration/crds/v1beta1/syslogng_types.md b/content/docs/configuration/crds/v1beta1/syslogng_types.md index 018d4f8fd..1c9d7a387 100644 --- a/content/docs/configuration/crds/v1beta1/syslogng_types.md +++ b/content/docs/configuration/crds/v1beta1/syslogng_types.md @@ -14,6 +14,11 @@ SyslogNGSpec defines the desired state of SyslogNG ### bufferVolumeMetricsService (*typeoverride.Service, optional) {#syslogngspec-buffervolumemetricsservice} +### configCheck (*ConfigCheck, optional) {#syslogngspec-configcheck} + +Overrides the default logging level configCheck setup. This field is not used directly, just copied over the field in the logging resource if defined. + + ### configCheckPod (*typeoverride.PodSpec, optional) {#syslogngspec-configcheckpod} diff --git a/content/docs/configuration/plugins/outputs/buffer.md b/content/docs/configuration/plugins/outputs/buffer.md index 2a93cbec9..c4ab0be06 100644 --- a/content/docs/configuration/plugins/outputs/buffer.md +++ b/content/docs/configuration/plugins/outputs/buffer.md @@ -18,7 +18,7 @@ The max number of events that each chunks can store in it ### chunk_limit_size (string, optional) {#buffer-chunk_limit_size} -The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB) +The max size of each chunks: events will be written into chunks until the size of chunks become this size Default: 8MB diff --git a/content/docs/configuration/plugins/outputs/elasticsearch.md b/content/docs/configuration/plugins/outputs/elasticsearch.md index 963662486..1e08a7be2 100644 --- a/content/docs/configuration/plugins/outputs/elasticsearch.md +++ b/content/docs/configuration/plugins/outputs/elasticsearch.md @@ -54,6 +54,12 @@ Configure bulk_message request splitting threshold size. Default value is 20MB. Default: 20MB +### compression_level (string, optional) {#elasticsearch-compression_level} + +Option for compressing the output data using gzip. Valid options: default_compression, best_compression, best_speed, no_compression. + +Default: no_compression + ### content_type (string, optional) {#elasticsearch-content_type} With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload. diff --git a/content/docs/configuration/plugins/outputs/forward.md b/content/docs/configuration/plugins/outputs/forward.md index 10a1d7ebe..6801d1899 100644 --- a/content/docs/configuration/plugins/outputs/forward.md +++ b/content/docs/configuration/plugins/outputs/forward.md @@ -8,9 +8,9 @@ generated_file: true ### ack_response_timeout (int, optional) {#forwardoutput-ack_response_timeout} -This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. +This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. -Default: 190 +Default: 190 ### buffer (*Buffer, optional) {#forwardoutput-buffer} @@ -29,21 +29,21 @@ Enable client-side DNS round robin. Uniform randomly pick an IP address to send ### expire_dns_cache (int, optional) {#forwardoutput-expire_dns_cache} -Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. +Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. -Default: 0 +Default: 0 ### hard_timeout (int, optional) {#forwardoutput-hard_timeout} -The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. +The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. -Default: 60 +Default: 60 ### heartbeat_interval (int, optional) {#forwardoutput-heartbeat_interval} -The interval of the heartbeat packer. +The interval of the heartbeat packer. -Default: 1 +Default: 1 ### heartbeat_type (string, optional) {#forwardoutput-heartbeat_type} @@ -57,33 +57,33 @@ Ignore DNS resolution and errors at startup time. ### keepalive (bool, optional) {#forwardoutput-keepalive} -Enable keepalive connection. +Enable keepalive connection. -Default: false +Default: false ### keepalive_timeout (int, optional) {#forwardoutput-keepalive_timeout} -Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. +Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. -Default: 0 +Default: 0 ### phi_failure_detector (bool, optional) {#forwardoutput-phi_failure_detector} Use the "Phi accrual failure detector" to detect server failure. -Default: true +Default: true ### phi_threshold (int, optional) {#forwardoutput-phi_threshold} -The threshold parameter used to detect server faults. `phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s. +The threshold parameter used to detect server faults. `phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s. -Default: 16 +Default: 16 ### recover_wait (int, optional) {#forwardoutput-recover_wait} -The wait time before accepting a server fault recovery. +The wait time before accepting a server fault recovery. -Default: 10 +Default: 10 ### require_ack_response (bool, optional) {#forwardoutput-require_ack_response} @@ -97,9 +97,9 @@ Change the protocol to at-least-once. The plugin waits the ack from destination' ### send_timeout (int, optional) {#forwardoutput-send_timeout} -The timeout time when sending event logs. +The timeout time when sending event logs. -Default: 60 +Default: 60 ### servers ([]FluentdServer, required) {#forwardoutput-servers} @@ -113,9 +113,9 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### tls_allow_self_signed_cert (bool, optional) {#forwardoutput-tls_allow_self_signed_cert} -Allow self signed certificates or not. +Allow self signed certificates or not. -Default: false +Default: false ### tls_cert_logical_store_name (string, optional) {#forwardoutput-tls_cert_logical_store_name} @@ -139,9 +139,9 @@ Enable to use certificate enterprise store on Windows system certstore. This par ### tls_ciphers (string, optional) {#forwardoutput-tls_ciphers} -The cipher configuration of TLS transport. +The cipher configuration of TLS transport. -Default: ALL:!aNULL:!eNULL:!SSLv2 +Default: ALL:!aNULL:!eNULL:!SSLv2 ### tls_client_cert_path (*secret.Secret, optional) {#forwardoutput-tls_client_cert_path} @@ -160,21 +160,21 @@ The client private key path for TLS. ### tls_insecure_mode (bool, optional) {#forwardoutput-tls_insecure_mode} -Skip all verification of certificates or not. +Skip all verification of certificates or not. -Default: false +Default: false ### tls_verify_hostname (bool, optional) {#forwardoutput-tls_verify_hostname} -Verify hostname of servers and certificates or not in TLS transport. +Verify hostname of servers and certificates or not in TLS transport. -Default: true +Default: true ### tls_version (string, optional) {#forwardoutput-tls_version} -The default version of TLS transport. [TLSv1_1, TLSv1_2] +The default version of TLS transport. [TLSv1_1, TLSv1_2] -Default: TLSv1_2 +Default: TLSv1_2 ### transport (string, optional) {#forwardoutput-transport} @@ -183,9 +183,9 @@ The transport protocol to use [ tcp, tls ] ### verify_connection_at_startup (bool, optional) {#forwardoutput-verify_connection_at_startup} -Verify that a connection can be made with one of out_forward nodes at the time of startup. +Verify that a connection can be made with one of out_forward nodes at the time of startup. -Default: false +Default: false ## Fluentd Server @@ -209,9 +209,9 @@ The password for authentication. ### port (int, optional) {#fluentd-server-port} -The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. +The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. -Default: 24224 +Default: 24224 ### shared_key (*secret.Secret, optional) {#fluentd-server-shared_key} @@ -230,8 +230,8 @@ The username for authentication. ### weight (int, optional) {#fluentd-server-weight} -The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. . +The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. -Default: 60 +Default: 60 diff --git a/content/docs/configuration/plugins/outputs/gelf.md b/content/docs/configuration/plugins/outputs/gelf.md index cc8f33338..3876c096a 100644 --- a/content/docs/configuration/plugins/outputs/gelf.md +++ b/content/docs/configuration/plugins/outputs/gelf.md @@ -11,7 +11,12 @@ generated_file: true ## Configuration ## Output Config -### host (string, required) {#output-config-host} +### buffer (*Buffer, optional) {#output config-buffer} + +Available since ghcr.io/kube-logging/fluentd:v1.16-full-build.139 [Buffer](../buffer/) + + +### host (string, required) {#output config-host} Destination host diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md b/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md index 476697c6b..e59860039 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md @@ -40,6 +40,12 @@ spec: The document ID. If no ID is specified, a document ID is automatically generated. +### disk_buffer (*DiskBuffer, optional) {#elasticsearchoutput-disk_buffer} + +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). + +Default: false + ### index (string, optional) {#elasticsearchoutput-index} Name of the data stream, index, or index alias to perform the action on. diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md b/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md index 8a9a2a8ca..37bb9cb5b 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md @@ -4,7 +4,6 @@ weight: 200 generated_file: true --- -# Sending messages over Openobserve ## Overview Send messages to [OpenObserve](https://openobserve.ai/docs/api/ingestion/logs/json/) using its [Logs Ingestion - JSON API](https://openobserve.ai/docs/api/ingestion/logs/json/). This API accepts multiple records in batch in JSON format. @@ -41,6 +40,12 @@ For details on the available options of the output, see the [documentation of th ### (HTTPOutput, required) {#openobserveoutput-} +### disk_buffer (*DiskBuffer, optional) {#openobserveoutput-disk_buffer} + +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). + +Default: false + ### organization (string, optional) {#openobserveoutput-organization} Name of the organization in OpenObserve. diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/s3.md b/content/docs/configuration/plugins/syslog-ng-outputs/s3.md index 8085cb35a..375faba09 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/s3.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/s3.md @@ -68,6 +68,12 @@ Enable or disable compression. Default: false +### disk_buffer (*DiskBuffer, optional) {#s3output-disk_buffer} + +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). + +Default: false + ### flush_grace_period (int, optional) {#s3output-flush_grace_period} Set the number of seconds for flush period. diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/splunk_hec.md b/content/docs/configuration/plugins/syslog-ng-outputs/splunk_hec.md index 432b98359..4b157408f 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/splunk_hec.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/splunk_hec.md @@ -52,6 +52,12 @@ Fallback option for source field. Fallback option for sourcetype field. +### disk_buffer (*DiskBuffer, optional) {#splunkhecoutput-disk_buffer} + +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). + +Default: false + ### event (string, optional) {#splunkhecoutput-event} event() accepts a template, which declares the content of the log message sent to Splunk. Default value: `${MSG}` diff --git a/content/docs/image-versions.md b/content/docs/image-versions.md index a33da722f..2103cf09a 100644 --- a/content/docs/image-versions.md +++ b/content/docs/image-versions.md @@ -5,6 +5,22 @@ weight: 750 Logging operator uses the following image versions. +## Logging operator version 4.7 + +| Image repository | GitHub repository | Version | +| -------- | --- | -- | +| ghcr.io/kube-logging/node-exporter | https://github.com/kube-logging/node-exporter-image | v0.7.1 | +| ghcr.io/kube-logging/config-reloader | https://github.com/kube-logging/config-reloader | v0.0.5 | +| ghcr.io/kube-logging/fluentd-drain-watch | https://github.com/kube-logging/fluentd-drain-watch | v0.2.1 | +| k8s.gcr.io/pause | | 3.2 | +| docker.io/busybox | https://github.com/docker-library/busybox | latest | +| ghcr.io/axoflow/axosyslog | https://github.com/axoflow/axosyslog-docker/ | 4.7.1 | +| docker.io/fluent/fluent-bit | https://github.com/fluent/fluent-bit | 3.0.4 | +| ghcr.io/kube-logging/fluentd | https://github.com/kube-logging/fluentd-images | v1.16-full | +| ghcr.io/axoflow/axosyslog-metrics-exporter | https://github.com/axoflow/axosyslog-metrics-exporter | 0.0.2 | +| ghcr.io/kube-logging/syslogng-reload | https://github.com/kube-logging/syslogng-reload-image | v1.3.1 | +| ghcr.io/kube-logging/eventrouter | https://github.com/kube-logging/eventrouter | 0.4.0 | + ## Logging operator version 4.6 | Image repository | GitHub repository | Version | diff --git a/content/docs/whats-new/_index.md b/content/docs/whats-new/_index.md index 20b341fcb..51a8458c2 100644 --- a/content/docs/whats-new/_index.md +++ b/content/docs/whats-new/_index.md @@ -3,11 +3,61 @@ title: What's new weight: 50 --- +## Version 4.7 + +The following are the highlights and main changes of Logging operator 4.7. For a complete list of changes and bugfixes, see the [Logging operator 4.7 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.7.0) and the [Logging operator 4.7 release blog post](https://axoflow.com/logging-operator-4.7-release-announcement). + +### Breaking change for Fluentd + +When using the Fluentd aggregator, Logging operator has overridden the default `chunk_limit_size` for the Fluentd disk buffers. Since Fluentd updated the default value to a much saner default, Logging operator won't override that to avoid creating too many small buffer chunks. (Having too many small chunks can lead to `too many open files` errors.) + +This isn't an intrusive breaking change, it only affects your deployments if you intentionally or accidentally depended on this value. + +### JSON output format for Fluentd + +In addition to the default text format, Fluentd can now format the output as JSON: + +```yaml +spec: + fluentd: + logFormat: json +``` + + +### Disk buffer support for more outputs + +Enabling disk buffers wasn't available for some of the outputs, this has been fixed for: [Gelf]({{< relref "/docs/configuration/plugins/outputs/gelf.md" >}}), [Elasticsearch]({{< relref "/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md" >}}), [OpenObserve]({{< relref "/docs/configuration/plugins/syslog-ng-outputs/openobserve.md" >}}), [S3]({{< relref "/docs/configuration/plugins/syslog-ng-outputs/s3.md" >}}), [Splunk HEC]({{< relref "/docs/configuration/plugins/syslog-ng-outputs/splunk_hec.md" >}}). + +### Compression support for Elasticsearch + +The [Elasticsearch output of the Fluentd aggregator]({{< relref "/docs/configuration/plugins/outputs/elasticsearch.md#elasticsearch-compression_level" >}}) now supports compressing the output data using gzip. You can use the `compression_level` option use `default_compression`, `best_compression`, or `best_speed`. By default, compression is disabled. + +### Protected ClusterOutputs for Fluentd + +By default, ClusterOutputs can be referenced in any Flow. In certain scenarios, this means that users can send logs from Flows to the ClusterOutput possibly spamming the output with user logs. From now on, you can set the `protected` flag for ClusterOutputs and prevent Flows from sending logs to the protected ClusterOutput. + +### ConfigCheck settings for aggregators + +You can now specify `configCheck` settings globally in the Loggings CRD, and override them if needed on the aggregator level in the [Fluentd]({{< relref "/docs/configuration/crds/v1beta1/fluentd_types.md" >}}) or [SyslogNG]({{< relref "/docs/configuration/crds/v1beta1/syslogng_types.md" >}}) CRD. + +### Limit connections for Fluent Bit + +You can now limit the number of TCP connections that each Fluent Bit worker can open towards the aggregator endpoints. The `max_worker_connections` is set to unlimited by default, and should be used together with the `Workers` option (which defaults to 2 according to the [Fluent Bit documentation](https://docs.fluentbit.io/manual/pipeline/outputs/tcp-and-tls#:~:text=double-,Workers,-Enables%20dedicated%20thread)). The following example uses a single worker with a single connection: + +```yaml +kind: FluentbitAgent +spec: + network: + maxWorkerConnections: 1 + syslogng_output: + Workers: 1 +``` + ## Version 4.6 -The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the [Logging operator 4.6 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.6.0) and the [Logging operator 4.6 release blog post](fluent-bit-hot-reload-kubernetes-namespace-labels-vmware-outputs-logging-operator-4-6). +The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the [Logging operator 4.6 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.6.0) and the [Logging operator 4.6 release blog post](https://axoflow.com/fluent-bit-hot-reload-kubernetes-namespace-labels-vmware-outputs-logging-operator-4-6). -## Fluent Bit hot reload +### Fluent Bit hot reload As a Fluent Bit restart can take a long time when there are many files to index, Logging operator now supports [hot reload for Fluent Bit](https://docs.fluentbit.io/manual/administration/hot-reload) to reload its configuration on the fly. @@ -39,7 +89,7 @@ spec: Many thanks to @aslafy-z for contributing this feature! -## VMware Aria Operations output for Fluentd +### VMware Aria Operations output for Fluentd When using the Fluentd aggregator with the Logging operator, you can now send your logs to [VMware Aria Operations for Logs](https://www.vmware.com/products/aria-operations-for-logs.html). This output uses the [vmwareLogInsight plugin](https://github.com/vmware/fluent-plugin-vmware-loginsight). @@ -62,7 +112,7 @@ spec: Many thanks to @logikone for contributing this feature! -## VMware Log Intelligence output for Fluentd +### VMware Log Intelligence output for Fluentd When using the Fluentd aggregator with the Logging operator, you can now send your logs to [VMware Log Intelligence](https://aria.vmware.com/t/vmware-log-intelligence/). This output uses the [vmware_log_intelligence plugin](https://github.com/vmware/fluent-plugin-vmware-log-intelligence). @@ -90,7 +140,7 @@ spec: Many thanks to @zrobisho for contributing this feature! -## Kubernetes namespace labels and annotations +### Kubernetes namespace labels and annotations Logging operator 4.6 supports the new Fluent Bit Kubernetes filter options that will be released in Fluent Bit 3.0. That way you'll be able to enrich your logs with Kubernetes namespace labels and annotations right at the source of the log messages. @@ -110,7 +160,7 @@ spec: tag: 3.0.0 ``` -## Other changes +### Other changes - Enabling ServiceMonitor checks if Prometheus is already available. - You can now use a custom PVC without a template for the statefulset. @@ -123,7 +173,7 @@ spec: - The Elasticsearch output of the syslog-ng aggregator now supports the template option. - To avoid problems that might occur when a tenant has a faulty output and backpressure kicks in, Logging operator now creates a dedicated tail input for each tenant. -## Removed feature +### Removed feature We have removed support for [Pod Security Policies (PSPs)](https://kubernetes.io/docs/concepts/security/pod-security-policy/), which were deprecated in Kubernetes v1.21, and removed from Kubernetes in v1.25. @@ -133,7 +183,7 @@ Note that the API was left intact, it just doesn't do anything. The following are the highlights and main changes of Logging operator 4.5. For a complete list of changes and bugfixes, see the [Logging operator 4.5 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.5.0). -## Standalone FluentdConfig and SyslogNGConfig CRDs +### Standalone FluentdConfig and SyslogNGConfig CRDs Starting with Logging operator version 4.5, you can either configure Fluentd in the `Logging` CR, or you can use a standalone `FluentdConfig` CR. Similarly, you can use a standalone `SyslogNGConfig` CRD to configure syslog-ng. @@ -161,7 +211,7 @@ When using Fluentd as the log aggregator, you can now: - Set which [Azure Cloud to use]({{< relref "/docs/configuration/plugins/outputs/azurestore.md#output-config-azure_cloud" >}}) (for example, AzurePublicCloud), when using the Azure Storage output - Customize the `image` to use in [event and host tailers]({{< relref "/docs/configuration/crds/extensions/_index.md" >}}) -## Other changes +### Other changes - LoggingStatus now includes the number (problemsCount) and the related watchNamespaces to help troubleshooting @@ -211,6 +261,7 @@ without the dry-run or syntax-check flags, so output plugins or destination driv connections and will fail if there are any issues , for example, with the credentials. Add the following to you `Logging` resource spec: + ```yaml spec: configCheck: @@ -249,7 +300,7 @@ New +logging_buffer_size_bytes{entity="/buffers",host="all-to-file-fluentd-0"} 32253 ``` -## Other improvements +### Other improvements - You can now configure the resources of the buffer metrics sidecar. - You can now rerun failed configuration checks if there is no configcheck pod. diff --git a/content/headless/deploy-helm-intro.md b/content/headless/deploy-helm-intro.md index 7a9dcf1e8..b8ce6c13f 100644 --- a/content/headless/deploy-helm-intro.md +++ b/content/headless/deploy-helm-intro.md @@ -1,3 +1,3 @@ -To install the Logging operator using Helm, complete the following +To install the Logging operator using Helm, complete the following steps. > Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.